summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authormorpheus65535 <[email protected]>2023-10-14 09:56:21 -0400
committerGitHub <[email protected]>2023-10-14 09:56:21 -0400
commitc89da3e6192a519ccefa6bb7d9f9c9eaa280d373 (patch)
tree6343200ac81b8c7e8eb089ebfb828768f4d94c4f
parentd6579417ba3b3555bff3af446bef8a56e2d7a7c6 (diff)
downloadbazarr-1.3.2-beta.0.tar.gz
bazarr-1.3.2-beta.0.zip
Changing from config.ini to config.yamlv1.3.2-beta.0
-rw-r--r--bazarr/api/episodes/episodes_subtitles.py2
-rw-r--r--bazarr/api/movies/movies_subtitles.py2
-rw-r--r--bazarr/api/providers/providers_episodes.py2
-rw-r--r--bazarr/api/providers/providers_movies.py2
-rw-r--r--bazarr/api/subtitles/subtitles.py2
-rw-r--r--bazarr/api/system/account.py4
-rw-r--r--bazarr/api/system/searches.py4
-rw-r--r--bazarr/api/system/settings.py16
-rw-r--r--bazarr/api/utils.py2
-rw-r--r--bazarr/app/config.py868
-rw-r--r--bazarr/app/database.py20
-rw-r--r--bazarr/app/get_providers.py51
-rw-r--r--bazarr/app/scheduler.py22
-rw-r--r--bazarr/app/signalr_client.py10
-rw-r--r--bazarr/init.py55
-rw-r--r--bazarr/main.py6
-rw-r--r--bazarr/radarr/info.py4
-rw-r--r--bazarr/radarr/sync/movies.py4
-rw-r--r--bazarr/radarr/sync/parser.py2
-rw-r--r--bazarr/sonarr/info.py4
-rw-r--r--bazarr/sonarr/sync/parser.py4
-rw-r--r--bazarr/sonarr/sync/series.py4
-rw-r--r--bazarr/subtitles/adaptive_searching.py2
-rw-r--r--bazarr/subtitles/download.py10
-rw-r--r--bazarr/subtitles/indexer/movies.py14
-rw-r--r--bazarr/subtitles/indexer/series.py14
-rw-r--r--bazarr/subtitles/manual.py10
-rw-r--r--bazarr/subtitles/processing.py6
-rw-r--r--bazarr/subtitles/sync.py6
-rw-r--r--bazarr/subtitles/tools/mods.py2
-rw-r--r--bazarr/subtitles/tools/subsyncer.py8
-rw-r--r--bazarr/subtitles/upgrade.py10
-rw-r--r--bazarr/subtitles/upload.py12
-rw-r--r--bazarr/subtitles/utils.py2
-rw-r--r--bazarr/utilities/analytics.py2
-rw-r--r--bazarr/utilities/backup.py28
-rw-r--r--bazarr/utilities/health.py8
-rw-r--r--bazarr/utilities/path_mappings.py6
-rw-r--r--bazarr/utilities/post_processing.py2
-rw-r--r--frontend/.env.development2
-rw-r--r--frontend/config/configReader.ts36
-rw-r--r--frontend/package-lock.json20
-rw-r--r--frontend/package.json5
-rw-r--r--libs/dynaconf/__init__.py31
-rw-r--r--libs/dynaconf/base.py1285
-rw-r--r--libs/dynaconf/cli.py773
-rw-r--r--libs/dynaconf/constants.py52
-rw-r--r--libs/dynaconf/contrib/__init__.py5
-rw-r--r--libs/dynaconf/contrib/django_dynaconf_v2.py142
-rw-r--r--libs/dynaconf/contrib/flask_dynaconf.py230
-rw-r--r--libs/dynaconf/default_settings.py252
-rw-r--r--libs/dynaconf/loaders/__init__.py277
-rw-r--r--libs/dynaconf/loaders/base.py195
-rw-r--r--libs/dynaconf/loaders/env_loader.py108
-rw-r--r--libs/dynaconf/loaders/ini_loader.py62
-rw-r--r--libs/dynaconf/loaders/json_loader.py80
-rw-r--r--libs/dynaconf/loaders/py_loader.py148
-rw-r--r--libs/dynaconf/loaders/redis_loader.py108
-rw-r--r--libs/dynaconf/loaders/toml_loader.py122
-rw-r--r--libs/dynaconf/loaders/vault_loader.py186
-rw-r--r--libs/dynaconf/loaders/yaml_loader.py87
-rw-r--r--libs/dynaconf/strategies/__init__.py0
-rw-r--r--libs/dynaconf/strategies/filtering.py19
-rw-r--r--libs/dynaconf/test_settings.py8
-rw-r--r--libs/dynaconf/utils/__init__.py461
-rw-r--r--libs/dynaconf/utils/boxing.py81
-rw-r--r--libs/dynaconf/utils/files.py112
-rw-r--r--libs/dynaconf/utils/functional.py136
-rw-r--r--libs/dynaconf/utils/parse_conf.py401
-rw-r--r--libs/dynaconf/validator.py498
-rw-r--r--libs/dynaconf/validator_conditions.py90
-rw-r--r--libs/dynaconf/vendor/__init__.py0
-rw-r--r--libs/dynaconf/vendor/box/__init__.py15
-rw-r--r--libs/dynaconf/vendor/box/box.py689
-rw-r--r--libs/dynaconf/vendor/box/box_list.py276
-rw-r--r--libs/dynaconf/vendor/box/config_box.py133
-rw-r--r--libs/dynaconf/vendor/box/converters.py129
-rw-r--r--libs/dynaconf/vendor/box/exceptions.py22
-rw-r--r--libs/dynaconf/vendor/box/from_file.py73
-rw-r--r--libs/dynaconf/vendor/box/shorthand_box.py38
-rw-r--r--libs/dynaconf/vendor/click/__init__.py75
-rw-r--r--libs/dynaconf/vendor/click/_bashcomplete.py371
-rw-r--r--libs/dynaconf/vendor/click/_compat.py611
-rw-r--r--libs/dynaconf/vendor/click/_termui_impl.py667
-rw-r--r--libs/dynaconf/vendor/click/_textwrap.py37
-rw-r--r--libs/dynaconf/vendor/click/_unicodefun.py82
-rw-r--r--libs/dynaconf/vendor/click/_winconsole.py308
-rw-r--r--libs/dynaconf/vendor/click/core.py2070
-rw-r--r--libs/dynaconf/vendor/click/decorators.py331
-rw-r--r--libs/dynaconf/vendor/click/exceptions.py233
-rw-r--r--libs/dynaconf/vendor/click/formatting.py279
-rw-r--r--libs/dynaconf/vendor/click/globals.py47
-rw-r--r--libs/dynaconf/vendor/click/parser.py431
-rw-r--r--libs/dynaconf/vendor/click/termui.py688
-rw-r--r--libs/dynaconf/vendor/click/testing.py362
-rw-r--r--libs/dynaconf/vendor/click/types.py726
-rw-r--r--libs/dynaconf/vendor/click/utils.py440
-rw-r--r--libs/dynaconf/vendor/dotenv/__init__.py46
-rw-r--r--libs/dynaconf/vendor/dotenv/cli.py145
-rw-r--r--libs/dynaconf/vendor/dotenv/compat.py49
-rw-r--r--libs/dynaconf/vendor/dotenv/ipython.py41
-rw-r--r--libs/dynaconf/vendor/dotenv/main.py323
-rw-r--r--libs/dynaconf/vendor/dotenv/parser.py237
-rw-r--r--libs/dynaconf/vendor/dotenv/py.typed1
-rw-r--r--libs/dynaconf/vendor/dotenv/version.py1
-rw-r--r--libs/dynaconf/vendor/ruamel/__init__.py0
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/CHANGES957
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/LICENSE21
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/MANIFEST.in3
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/PKG-INFO782
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/README.rst752
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/__init__.py60
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/anchor.py20
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/comments.py1149
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/compat.py324
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/composer.py238
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/configobjwalker.py14
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/constructor.py1805
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/cyaml.py185
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/dumper.py221
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/emitter.py1688
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/error.py311
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/events.py157
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/loader.py74
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/main.py1534
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/nodes.py131
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/parser.py802
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/py.typed0
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/reader.py311
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/representer.py1283
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/resolver.py399
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/scalarbool.py51
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/scalarfloat.py127
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/scalarint.py130
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/scalarstring.py156
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/scanner.py1980
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/serializer.py240
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/setup.cfg4
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/setup.py962
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/timestamp.py28
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/tokens.py286
-rw-r--r--libs/dynaconf/vendor/ruamel/yaml/util.py190
-rw-r--r--libs/dynaconf/vendor/source4
-rw-r--r--libs/dynaconf/vendor/toml/DEPRECATION.txt3
-rw-r--r--libs/dynaconf/vendor/toml/__init__.py25
-rw-r--r--libs/dynaconf/vendor/toml/decoder.py1052
-rw-r--r--libs/dynaconf/vendor/toml/encoder.py304
-rw-r--r--libs/dynaconf/vendor/toml/ordered.py15
-rw-r--r--libs/dynaconf/vendor/toml/tz.py21
-rw-r--r--libs/dynaconf/vendor/tomllib/__init__.py16
-rw-r--r--libs/dynaconf/vendor/tomllib/_parser.py690
-rw-r--r--libs/dynaconf/vendor/tomllib/_re.py106
-rw-r--r--libs/dynaconf/vendor/tomllib/_types.py9
-rw-r--r--libs/dynaconf/vendor/tomllib/_writer.py202
-rw-r--r--libs/dynaconf/vendor/vendor.txt6
-rw-r--r--libs/dynaconf/vendor/vendor_history26
-rw-r--r--libs/simpleconfigparser/__init__.py131
-rw-r--r--libs/version.txt2
158 files changed, 36462 insertions, 745 deletions
diff --git a/bazarr/api/episodes/episodes_subtitles.py b/bazarr/api/episodes/episodes_subtitles.py
index cdb66992d..4795bc98c 100644
--- a/bazarr/api/episodes/episodes_subtitles.py
+++ b/bazarr/api/episodes/episodes_subtitles.py
@@ -162,7 +162,7 @@ class EpisodesSubtitles(Resource):
provider = "manual"
score = 360
history_log(4, sonarrSeriesId, sonarrEpisodeId, result, fake_provider=provider, fake_score=score)
- if not settings.general.getboolean('dont_notify_manual_actions'):
+ if not settings.general.dont_notify_manual_actions:
send_notifications(sonarrSeriesId, sonarrEpisodeId, result.message)
store_subtitles(result.path, episodePath)
except OSError:
diff --git a/bazarr/api/movies/movies_subtitles.py b/bazarr/api/movies/movies_subtitles.py
index f544cd8cb..db9385b71 100644
--- a/bazarr/api/movies/movies_subtitles.py
+++ b/bazarr/api/movies/movies_subtitles.py
@@ -158,7 +158,7 @@ class MoviesSubtitles(Resource):
provider = "manual"
score = 120
history_log_movie(4, radarrId, result, fake_provider=provider, fake_score=score)
- if not settings.general.getboolean('dont_notify_manual_actions'):
+ if not settings.general.dont_notify_manual_actions:
send_notifications_movie(radarrId, result.message)
store_subtitles_movie(result.path, moviePath)
except OSError:
diff --git a/bazarr/api/providers/providers_episodes.py b/bazarr/api/providers/providers_episodes.py
index c7a20e151..9d880717e 100644
--- a/bazarr/api/providers/providers_episodes.py
+++ b/bazarr/api/providers/providers_episodes.py
@@ -141,7 +141,7 @@ class ProviderEpisodes(Resource):
result = result[0]
if isinstance(result, ProcessSubtitlesResult):
history_log(2, sonarrSeriesId, sonarrEpisodeId, result)
- if not settings.general.getboolean('dont_notify_manual_actions'):
+ if not settings.general.dont_notify_manual_actions:
send_notifications(sonarrSeriesId, sonarrEpisodeId, result.message)
store_subtitles(result.path, episodePath)
elif isinstance(result, str):
diff --git a/bazarr/api/providers/providers_movies.py b/bazarr/api/providers/providers_movies.py
index 0df6a5f08..92b6f9995 100644
--- a/bazarr/api/providers/providers_movies.py
+++ b/bazarr/api/providers/providers_movies.py
@@ -135,7 +135,7 @@ class ProviderMovies(Resource):
result = result[0]
if isinstance(result, ProcessSubtitlesResult):
history_log_movie(2, radarrId, result)
- if not settings.general.getboolean('dont_notify_manual_actions'):
+ if not settings.general.dont_notify_manual_actions:
send_notifications_movie(radarrId, result.message)
store_subtitles_movie(result.path, moviePath)
elif isinstance(result, str):
diff --git a/bazarr/api/subtitles/subtitles.py b/bazarr/api/subtitles/subtitles.py
index 4822ae644..eb021613e 100644
--- a/bazarr/api/subtitles/subtitles.py
+++ b/bazarr/api/subtitles/subtitles.py
@@ -116,7 +116,7 @@ class Subtitles(Resource):
# apply chmod if required
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
- 'win') and settings.general.getboolean('chmod_enabled') else None
+ 'win') and settings.general.chmod_enabled else None
if chmod:
os.chmod(subtitles_path, chmod)
diff --git a/bazarr/api/system/account.py b/bazarr/api/system/account.py
index 536e14f45..fa2231aac 100644
--- a/bazarr/api/system/account.py
+++ b/bazarr/api/system/account.py
@@ -24,12 +24,12 @@ class SystemAccount(Resource):
@api_ns_system_account.response(400, 'Unknown action')
@api_ns_system_account.response(403, 'Authentication failed')
@api_ns_system_account.response(406, 'Browser must be closed to invalidate basic authentication')
- @api_ns_system_account.response(500, 'Unknown authentication type define in config.ini')
+ @api_ns_system_account.response(500, 'Unknown authentication type define in config')
def post(self):
"""Login or logout from Bazarr UI when using form login"""
args = self.post_request_parser.parse_args()
if settings.auth.type != 'form':
- return 'Unknown authentication type define in config.ini', 500
+ return 'Unknown authentication type define in config', 500
action = args.get('action')
if action == 'login':
diff --git a/bazarr/api/system/searches.py b/bazarr/api/system/searches.py
index 5560e1101..755711446 100644
--- a/bazarr/api/system/searches.py
+++ b/bazarr/api/system/searches.py
@@ -27,7 +27,7 @@ class Searches(Resource):
search_list = []
if query:
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
# Get matching series
search_list += database.execute(
select(TableShows.title,
@@ -36,7 +36,7 @@ class Searches(Resource):
.order_by(TableShows.title)) \
.all()
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
# Get matching movies
search_list += database.execute(
select(TableMovies.title,
diff --git a/bazarr/api/system/settings.py b/bazarr/api/system/settings.py
index bad7e9dc8..1dcfbd48b 100644
--- a/bazarr/api/system/settings.py
+++ b/bazarr/api/system/settings.py
@@ -4,6 +4,7 @@ import json
from flask import request, jsonify
from flask_restx import Resource, Namespace
+from dynaconf.validator import ValidationError
from app.database import TableLanguagesProfiles, TableSettingsLanguages, TableSettingsNotifier, \
update_profile_id_list, database, insert, update, delete, select
@@ -97,9 +98,9 @@ class SystemSettings(Resource):
event_stream("languages")
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
scheduler.add_job(list_missing_subtitles, kwargs={'send_event': True})
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
scheduler.add_job(list_missing_subtitles_movies, kwargs={'send_event': True})
# Update Notification
@@ -112,6 +113,11 @@ class SystemSettings(Resource):
url=item['url'])
.where(TableSettingsNotifier.name == item['name']))
- save_settings(zip(request.form.keys(), request.form.listvalues()))
- event_stream("settings")
- return '', 204
+ try:
+ save_settings(zip(request.form.keys(), request.form.listvalues()))
+ except ValidationError as e:
+ event_stream("settings")
+ return e.message, 406
+ else:
+ event_stream("settings")
+ return '', 204
diff --git a/bazarr/api/utils.py b/bazarr/api/utils.py
index a61d50527..534b4b3e8 100644
--- a/bazarr/api/utils.py
+++ b/bazarr/api/utils.py
@@ -77,7 +77,7 @@ def postprocess(item):
"hi": language[1] == 'hi',
}
)
- if settings.general.getboolean('embedded_subs_show_desired') and item.get('profileId'):
+ if settings.general.embedded_subs_show_desired and item.get('profileId'):
desired_lang_list = get_desired_languages(item['profileId'])
item['subtitles'] = [x for x in item['subtitles'] if x['code2'] in desired_lang_list or x['path']]
item['subtitles'] = sorted(item['subtitles'], key=itemgetter('name', 'forced'))
diff --git a/bazarr/app/config.py b/bazarr/app/config.py
index 6a759c89c..f6244179b 100644
--- a/bazarr/app/config.py
+++ b/bazarr/app/config.py
@@ -3,21 +3,21 @@
import hashlib
import os
import ast
+import logging
from urllib.parse import quote_plus
from subliminal.cache import region
-from simpleconfigparser import simpleconfigparser, configparser, NoOptionError
+from dynaconf import Dynaconf, Validator as OriginalValidator
+from dynaconf.loaders.yaml_loader import write
+from dynaconf.validator import ValidationError
+from dynaconf.utils.functional import empty
+from ipaddress import ip_address
+from binascii import hexlify
+from types import MappingProxyType
from .get_args import args
-
-class SimpleConfigParser(simpleconfigparser):
-
- def get(self, section, option, raw=False, vars=None):
- try:
- return configparser.get(self, section, option, raw=raw, vars=vars)
- except NoOptionError:
- return None
+NoneType = type(None)
def base_url_slash_cleaner(uri):
@@ -26,275 +26,371 @@ def base_url_slash_cleaner(uri):
return uri
-defaults = {
- 'general': {
- 'ip': '0.0.0.0',
- 'port': '6767',
- 'base_url': '',
- 'path_mappings': '[]',
- 'debug': 'False',
- 'branch': 'master',
- 'auto_update': 'True',
- 'single_language': 'False',
- 'minimum_score': '90',
- 'use_scenename': 'True',
- 'use_postprocessing': 'False',
- 'postprocessing_cmd': '',
- 'postprocessing_threshold': '90',
- 'use_postprocessing_threshold': 'False',
- 'postprocessing_threshold_movie': '70',
- 'use_postprocessing_threshold_movie': 'False',
- 'use_sonarr': 'False',
- 'use_radarr': 'False',
- 'path_mappings_movie': '[]',
- 'serie_default_enabled': 'False',
- 'serie_default_profile': '',
- 'movie_default_enabled': 'False',
- 'movie_default_profile': '',
- 'page_size': '25',
- 'theme': 'auto',
- 'page_size_manual_search': '10',
- 'minimum_score_movie': '70',
- 'use_embedded_subs': 'True',
- 'embedded_subs_show_desired': 'True',
- 'utf8_encode': 'True',
- 'ignore_pgs_subs': 'False',
- 'ignore_vobsub_subs': 'False',
- 'ignore_ass_subs': 'False',
- 'adaptive_searching': 'True',
- 'adaptive_searching_delay': '3w',
- 'adaptive_searching_delta': '1w',
- 'enabled_providers': '[]',
- 'multithreading': 'True',
- 'chmod_enabled': 'False',
- 'chmod': '0640',
- 'subfolder': 'current',
- 'subfolder_custom': '',
- 'upgrade_subs': 'True',
- 'upgrade_frequency': '12',
- 'days_to_upgrade_subs': '7',
- 'upgrade_manual': 'True',
- 'anti_captcha_provider': 'None',
- 'wanted_search_frequency': '6',
- 'wanted_search_frequency_movie': '6',
- 'subzero_mods': '[]',
- 'dont_notify_manual_actions': 'False',
- 'hi_extension': 'hi',
- 'embedded_subtitles_parser': 'ffprobe',
- 'default_und_audio_lang': '',
- 'default_und_embedded_subtitles_lang': '',
- 'parse_embedded_audio_track': 'False',
- 'skip_hashing': 'False',
- 'language_equals': '[]',
- },
- 'auth': {
- 'type': 'None',
- 'username': '',
- 'password': ''
- },
- 'cors': {
- 'enabled': 'False'
- },
- 'backup': {
- 'folder': os.path.join(args.config_dir, 'backup'),
- 'retention': '31',
- 'frequency': 'Weekly',
- 'day': '6',
- 'hour': '3'
- },
- 'sonarr': {
- 'ip': '127.0.0.1',
- 'port': '8989',
- 'base_url': '/',
- 'ssl': 'False',
- 'http_timeout': '60',
- 'apikey': '',
- 'full_update': 'Daily',
- 'full_update_day': '6',
- 'full_update_hour': '4',
- 'only_monitored': 'False',
- 'series_sync': '60',
- 'episodes_sync': '60',
- 'excluded_tags': '[]',
- 'excluded_series_types': '[]',
- 'use_ffprobe_cache': 'True',
- 'exclude_season_zero': 'False',
- 'defer_search_signalr': 'False'
- },
- 'radarr': {
- 'ip': '127.0.0.1',
- 'port': '7878',
- 'base_url': '/',
- 'ssl': 'False',
- 'http_timeout': '60',
- 'apikey': '',
- 'full_update': 'Daily',
- 'full_update_day': '6',
- 'full_update_hour': '5',
- 'only_monitored': 'False',
- 'movies_sync': '60',
- 'excluded_tags': '[]',
- 'use_ffprobe_cache': 'True',
- 'defer_search_signalr': 'False'
- },
- 'proxy': {
- 'type': 'None',
- 'url': '',
- 'port': '',
- 'username': '',
- 'password': '',
- 'exclude': '["localhost","127.0.0.1"]'
- },
- 'opensubtitles': {
- 'username': '',
- 'password': '',
- 'use_tag_search': 'False',
- 'vip': 'False',
- 'ssl': 'False',
- 'timeout': '15',
- 'skip_wrong_fps': 'False'
- },
- 'opensubtitlescom': {
- 'username': '',
- 'password': '',
- 'use_hash': 'True'
- },
- 'addic7ed': {
- 'username': '',
- 'password': '',
- 'cookies': '',
- 'user_agent': '',
- 'vip': 'False'
- },
- 'podnapisi': {
- 'verify_ssl': 'True'
- },
- 'subf2m': {
- 'verify_ssl': 'True',
- 'user_agent': ''
- },
- 'whisperai': {
- 'endpoint': 'http://127.0.0.1:9000',
- 'timeout': '3600'
- },
- 'legendasdivx': {
- 'username': '',
- 'password': '',
- 'skip_wrong_fps': 'False'
- },
- 'ktuvit': {
- 'email': '',
- 'hashed_password': ''
- },
- 'xsubs': {
- 'username': '',
- 'password': ''
- },
- 'assrt': {
- 'token': ''
- },
- 'anticaptcha': {
- 'anti_captcha_key': ''
- },
- 'deathbycaptcha': {
- 'username': '',
- 'password': ''
- },
- 'napisy24': {
- 'username': '',
- 'password': ''
- },
- 'subscene': {
- 'username': '',
- 'password': ''
- },
- 'betaseries': {
- 'token': ''
- },
- 'analytics': {
- 'enabled': 'True'
- },
- 'titlovi': {
- 'username': '',
- 'password': ''
- },
- 'titulky': {
- 'username': '',
- 'password': '',
- 'approved_only': 'False'
- },
- 'embeddedsubtitles': {
- 'included_codecs': '[]',
- 'hi_fallback': 'False',
- 'timeout': '600',
- 'unknown_as_english': 'False',
- },
- 'hdbits': {
- 'username': '',
- 'passkey': '',
- },
- 'karagarga': {
- 'username': '',
- 'password': '',
- 'f_username': '',
- 'f_password': '',
- },
- 'subsync': {
- 'use_subsync': 'False',
- 'use_subsync_threshold': 'False',
- 'subsync_threshold': '90',
- 'use_subsync_movie_threshold': 'False',
- 'subsync_movie_threshold': '70',
- 'debug': 'False',
- 'force_audio': 'False'
- },
- 'series_scores': {
- "hash": 359,
- "series": 180,
- "year": 90,
- "season": 30,
- "episode": 30,
- "release_group": 14,
- "source": 7,
- "audio_codec": 3,
- "resolution": 2,
- "video_codec": 2,
- "streaming_service": 1,
- "hearing_impaired": 1,
- },
- 'movie_scores': {
- "hash": 119,
- "title": 60,
- "year": 30,
- "release_group": 13,
- "source": 7,
- "audio_codec": 3,
- "resolution": 2,
- "video_codec": 2,
- "streaming_service": 1,
- "edition": 1,
- "hearing_impaired": 1,
- },
- 'postgresql': {
- 'enabled': 'False',
- 'host': 'localhost',
- 'port': '5432',
- 'database': '',
- 'username': '',
- 'password': '',
- },
-}
-
-settings = SimpleConfigParser(defaults=defaults, interpolation=None)
-settings.read(os.path.join(args.config_dir, 'config', 'config.ini'))
-
-settings.general.base_url = settings.general.base_url if settings.general.base_url else '/'
+def validate_ip_address(ip_string):
+ try:
+ ip_address(ip_string)
+ return True
+ except ValueError:
+ return False
+
+
+class Validator(OriginalValidator):
+ # Give the ability to personalize messages sent by the original dynasync Validator class.
+ default_messages = MappingProxyType(
+ {
+ "must_exist_true": "{name} is required",
+ "must_exist_false": "{name} cannot exists",
+ "condition": "{name} invalid for {function}({value})",
+ "operations": "{name} must {operation} {op_value} but it is {value}",
+ "combined": "combined validators failed {errors}",
+ }
+ )
+
+
+validators = [
+ # general section
+ Validator('general.flask_secret_key', must_exist=True, default=hexlify(os.urandom(16)).decode(),
+ is_type_of=str),
+ Validator('general.ip', must_exist=True, default='0.0.0.0', is_type_of=str, condition=validate_ip_address),
+ Validator('general.port', must_exist=True, default=6767, is_type_of=int, gte=1, lte=65535),
+ Validator('general.base_url', must_exist=True, default='', is_type_of=str),
+ Validator('general.path_mappings', must_exist=True, default=[], is_type_of=list),
+ Validator('general.debug', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.branch', must_exist=True, default='master', is_type_of=str,
+ is_in=['master', 'development']),
+ Validator('general.auto_update', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.single_language', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.minimum_score', must_exist=True, default=90, is_type_of=int, gte=0, lte=100),
+ Validator('general.use_scenename', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.use_postprocessing', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.postprocessing_cmd', must_exist=True, default='', is_type_of=str),
+ Validator('general.postprocessing_threshold', must_exist=True, default=90, is_type_of=int, gte=0, lte=100),
+ Validator('general.use_postprocessing_threshold', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.postprocessing_threshold_movie', must_exist=True, default=70, is_type_of=int, gte=0,
+ lte=100),
+ Validator('general.use_postprocessing_threshold_movie', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.use_sonarr', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.use_radarr', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.path_mappings_movie', must_exist=True, default=[], is_type_of=list),
+ Validator('general.serie_default_enabled', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.serie_default_profile', must_exist=True, default='', is_type_of=(int, str)),
+ Validator('general.movie_default_enabled', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.movie_default_profile', must_exist=True, default='', is_type_of=(int, str)),
+ Validator('general.page_size', must_exist=True, default=25, is_type_of=int,
+ is_in=[25, 50, 100, 250, 500, 1000]),
+ Validator('general.theme', must_exist=True, default='auto', is_type_of=str,
+ is_in=['auto', 'light', 'dark']),
+ Validator('general.minimum_score_movie', must_exist=True, default=70, is_type_of=int, gte=0, lte=100),
+ Validator('general.use_embedded_subs', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.embedded_subs_show_desired', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.utf8_encode', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.ignore_pgs_subs', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.ignore_vobsub_subs', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.ignore_ass_subs', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.adaptive_searching', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.adaptive_searching_delay', must_exist=True, default='3w', is_type_of=str,
+ is_in=['1w', '2w', '3w', '4w']),
+ Validator('general.adaptive_searching_delta', must_exist=True, default='1w', is_type_of=str,
+ is_in=['3d', '1w', '2w', '3w', '4w']),
+ Validator('general.enabled_providers', must_exist=True, default=[], is_type_of=list),
+ Validator('general.multithreading', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.chmod_enabled', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.chmod', must_exist=True, default='0640', is_type_of=str),
+ Validator('general.subfolder', must_exist=True, default='current', is_type_of=str),
+ Validator('general.subfolder_custom', must_exist=True, default='', is_type_of=str),
+ Validator('general.upgrade_subs', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.upgrade_frequency', must_exist=True, default=12, is_type_of=int, is_in=[6, 12, 24]),
+ Validator('general.days_to_upgrade_subs', must_exist=True, default=7, is_type_of=int, gte=0, lte=30),
+ Validator('general.upgrade_manual', must_exist=True, default=True, is_type_of=bool),
+ Validator('general.anti_captcha_provider', must_exist=True, default=None, is_type_of=(NoneType, str),
+ is_in=[None, 'anti-captcha', 'death-by-captcha']),
+ Validator('general.wanted_search_frequency', must_exist=True, default=6, is_type_of=int, is_in=[6, 12, 24]),
+ Validator('general.wanted_search_frequency_movie', must_exist=True, default=6, is_type_of=int,
+ is_in=[6, 12, 24]),
+ Validator('general.subzero_mods', must_exist=True, default='', is_type_of=str),
+ Validator('general.dont_notify_manual_actions', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.hi_extension', must_exist=True, default='hi', is_type_of=str, is_in=['hi', 'cc', 'sdh']),
+ Validator('general.embedded_subtitles_parser', must_exist=True, default='ffprobe', is_type_of=str,
+ is_in=['ffprobe', 'mediainfo']),
+ Validator('general.default_und_audio_lang', must_exist=True, default='', is_type_of=str),
+ Validator('general.default_und_embedded_subtitles_lang', must_exist=True, default='', is_type_of=str),
+ Validator('general.parse_embedded_audio_track', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.skip_hashing', must_exist=True, default=False, is_type_of=bool),
+ Validator('general.language_equals', must_exist=True, default=[], is_type_of=list),
+
+ # auth section
+ Validator('auth.apikey', must_exist=True, default=hexlify(os.urandom(16)).decode(), is_type_of=str),
+ Validator('auth.type', must_exist=True, default=None, is_type_of=(NoneType, str),
+ is_in=[None, 'basic', 'form']),
+ Validator('auth.username', must_exist=True, default='', is_type_of=str),
+ Validator('auth.password', must_exist=True, default='', is_type_of=str),
+
+ # cors section
+ Validator('cors.enabled', must_exist=True, default=False, is_type_of=bool),
+
+ # backup section
+ Validator('backup.folder', must_exist=True, default=os.path.join(args.config_dir, 'backup'),
+ is_type_of=str),
+ Validator('backup.retention', must_exist=True, default=31, is_type_of=int, gte=0),
+ Validator('backup.frequency', must_exist=True, default='Weekly', is_type_of=str,
+ is_in=['Manually', 'Daily', 'Weekly']),
+ Validator('backup.day', must_exist=True, default=6, is_type_of=int, gte=0, lte=6),
+ Validator('backup.hour', must_exist=True, default=3, is_type_of=int, gte=0, lte=23),
+
+ # sonarr section
+ Validator('sonarr.ip', must_exist=True, default='127.0.0.1', is_type_of=str),
+ Validator('sonarr.port', must_exist=True, default=8989, is_type_of=int, gte=1, lte=65535),
+ Validator('sonarr.base_url', must_exist=True, default='/', is_type_of=str),
+ Validator('sonarr.ssl', must_exist=True, default=False, is_type_of=bool),
+ Validator('sonarr.http_timeout', must_exist=True, default=60, is_type_of=int,
+ is_in=[60, 120, 180, 240, 300, 600]),
+ Validator('sonarr.apikey', must_exist=True, default='', is_type_of=str),
+ Validator('sonarr.full_update', must_exist=True, default='Daily', is_type_of=str,
+ is_in=['Manually', 'Daily', 'Weekly']),
+ Validator('sonarr.full_update_day', must_exist=True, default=6, is_type_of=int, gte=0, lte=6),
+ Validator('sonarr.full_update_hour', must_exist=True, default=4, is_type_of=int, gte=0, lte=23),
+ Validator('sonarr.only_monitored', must_exist=True, default=False, is_type_of=bool),
+ Validator('sonarr.series_sync', must_exist=True, default=60, is_type_of=int,
+ is_in=[15, 60, 180, 360, 720, 1440]),
+ Validator('sonarr.episodes_sync', must_exist=True, default=60, is_type_of=int,
+ is_in=[15, 60, 180, 360, 720, 1440]),
+ Validator('sonarr.excluded_tags', must_exist=True, default=[], is_type_of=list),
+ Validator('sonarr.excluded_series_types', must_exist=True, default=[], is_type_of=list),
+ Validator('sonarr.use_ffprobe_cache', must_exist=True, default=True, is_type_of=bool),
+ Validator('sonarr.exclude_season_zero', must_exist=True, default=False, is_type_of=bool),
+ Validator('sonarr.defer_search_signalr', must_exist=True, default=False, is_type_of=bool),
+
+ # radarr section
+ Validator('radarr.ip', must_exist=True, default='127.0.0.1', is_type_of=str),
+ Validator('radarr.port', must_exist=True, default=7878, is_type_of=int, gte=1, lte=65535),
+ Validator('radarr.base_url', must_exist=True, default='/', is_type_of=str),
+ Validator('radarr.ssl', must_exist=True, default=False, is_type_of=bool),
+ Validator('radarr.http_timeout', must_exist=True, default=60, is_type_of=int,
+ is_in=[60, 120, 180, 240, 300, 600]),
+ Validator('radarr.apikey', must_exist=True, default='', is_type_of=str),
+ Validator('radarr.full_update', must_exist=True, default='Daily', is_type_of=str,
+ is_in=['Manually', 'Daily', 'Weekly']),
+ Validator('radarr.full_update_day', must_exist=True, default=6, is_type_of=int, gte=0, lte=6),
+ Validator('radarr.full_update_hour', must_exist=True, default=4, is_type_of=int, gte=0, lte=23),
+ Validator('radarr.only_monitored', must_exist=True, default=False, is_type_of=bool),
+ Validator('radarr.movies_sync', must_exist=True, default=60, is_type_of=int,
+ is_in=[15, 60, 180, 360, 720, 1440]),
+ Validator('radarr.excluded_tags', must_exist=True, default=[], is_type_of=list),
+ Validator('radarr.use_ffprobe_cache', must_exist=True, default=True, is_type_of=bool),
+ Validator('radarr.defer_search_signalr', must_exist=True, default=False, is_type_of=bool),
+
+ # proxy section
+ Validator('proxy.type', must_exist=True, default=None, is_type_of=(NoneType, str),
+ is_in=[None, 'socks5', 'http']),
+ Validator('proxy.url', must_exist=True, default='', is_type_of=str),
+ Validator('proxy.port', must_exist=True, default='', is_type_of=(str, int)),
+ Validator('proxy.username', must_exist=True, default='', is_type_of=str),
+ Validator('proxy.password', must_exist=True, default='', is_type_of=str),
+ Validator('proxy.exclude', must_exist=True, default=["localhost", "127.0.0.1"], is_type_of=list),
+
+ # opensubtitles.org section
+ Validator('opensubtitles.username', must_exist=True, default='', is_type_of=str),
+ Validator('opensubtitles.password', must_exist=True, default='', is_type_of=str),
+ Validator('opensubtitles.use_tag_search', must_exist=True, default=False, is_type_of=bool),
+ Validator('opensubtitles.vip', must_exist=True, default=False, is_type_of=bool),
+ Validator('opensubtitles.ssl', must_exist=True, default=False, is_type_of=bool),
+ Validator('opensubtitles.timeout', must_exist=True, default=15, is_type_of=int, gte=1),
+ Validator('opensubtitles.skip_wrong_fps', must_exist=True, default=False, is_type_of=bool),
+
+ # opensubtitles.com section
+ Validator('opensubtitlescom.username', must_exist=True, default='', is_type_of=str),
+ Validator('opensubtitlescom.password', must_exist=True, default='', is_type_of=str),
+ Validator('opensubtitlescom.use_hash', must_exist=True, default=True, is_type_of=bool),
+
+ # addic7ed section
+ Validator('addic7ed.username', must_exist=True, default='', is_type_of=str),
+ Validator('addic7ed.password', must_exist=True, default='', is_type_of=str),
+ Validator('addic7ed.cookies', must_exist=True, default='', is_type_of=str),
+ Validator('addic7ed.user_agent', must_exist=True, default='', is_type_of=str),
+ Validator('addic7ed.vip', must_exist=True, default=False, is_type_of=bool),
+
+ # podnapisi section
+ Validator('podnapisi.verify_ssl', must_exist=True, default=True, is_type_of=bool),
+
+ # subf2m section
+ Validator('subf2m.verify_ssl', must_exist=True, default=True, is_type_of=bool),
+ Validator('subf2m.user_agent', must_exist=True, default='', is_type_of=str),
+
+ # hdbits section
+ Validator('hdbits.username', must_exist=True, default='', is_type_of=str),
+ Validator('hdbits.passkey', must_exist=True, default='', is_type_of=str),
+
+ # whisperai section
+ Validator('whisperai.endpoint', must_exist=True, default='http://127.0.0.1:9000', is_type_of=str),
+ Validator('whisperai.timeout', must_exist=True, default=3600, is_type_of=int, gte=1),
+
+ # legendasdivx section
+ Validator('legendasdivx.username', must_exist=True, default='', is_type_of=str),
+ Validator('legendasdivx.password', must_exist=True, default='', is_type_of=str),
+ Validator('legendasdivx.skip_wrong_fps', must_exist=True, default=False, is_type_of=bool),
+
+ # ktuvit section
+ Validator('ktuvit.email', must_exist=True, default='', is_type_of=str),
+ Validator('ktuvit.hashed_password', must_exist=True, default='', is_type_of=str),
+
+ # xsubs section
+ Validator('xsubs.username', must_exist=True, default='', is_type_of=str),
+ Validator('xsubs.password', must_exist=True, default='', is_type_of=str),
+
+ # assrt section
+ Validator('assrt.token', must_exist=True, default='', is_type_of=str),
+
+ # anticaptcha section
+ Validator('anticaptcha.anti_captcha_key', must_exist=True, default='', is_type_of=str),
+
+ # deathbycaptcha section
+ Validator('deathbycaptcha.username', must_exist=True, default='', is_type_of=str),
+ Validator('deathbycaptcha.password', must_exist=True, default='', is_type_of=str),
+
+ # napisy24 section
+ Validator('napisy24.username', must_exist=True, default='', is_type_of=str),
+ Validator('napisy24.password', must_exist=True, default='', is_type_of=str),
+
+ # subscene section
+ Validator('subscene.username', must_exist=True, default='', is_type_of=str),
+ Validator('subscene.password', must_exist=True, default='', is_type_of=str),
+
+ # betaseries section
+ Validator('betaseries.token', must_exist=True, default='', is_type_of=str),
+
+ # analytics section
+ Validator('analytics.enabled', must_exist=True, default=True, is_type_of=bool),
+
+ # titlovi section
+ Validator('titlovi.username', must_exist=True, default='', is_type_of=str),
+ Validator('titlovi.password', must_exist=True, default='', is_type_of=str),
+
+ # titulky section
+ Validator('titulky.username', must_exist=True, default='', is_type_of=str),
+ Validator('titulky.password', must_exist=True, default='', is_type_of=str),
+ Validator('titulky.approved_only', must_exist=True, default=False, is_type_of=bool),
+
+ # embeddedsubtitles section
+ Validator('embeddedsubtitles.included_codecs', must_exist=True, default=[], is_type_of=list),
+ Validator('embeddedsubtitles.hi_fallback', must_exist=True, default=False, is_type_of=bool),
+ Validator('embeddedsubtitles.timeout', must_exist=True, default=600, is_type_of=int, gte=1),
+ Validator('embeddedsubtitles.unknown_as_english', must_exist=True, default=False, is_type_of=bool),
+
+ # karagarga section
+ Validator('karagarga.username', must_exist=True, default='', is_type_of=str),
+ Validator('karagarga.password', must_exist=True, default='', is_type_of=str),
+ Validator('karagarga.f_username', must_exist=True, default='', is_type_of=str),
+ Validator('karagarga.f_password', must_exist=True, default='', is_type_of=str),
+
+ # subsync section
+ Validator('subsync.use_subsync', must_exist=True, default=False, is_type_of=bool),
+ Validator('subsync.use_subsync_threshold', must_exist=True, default=False, is_type_of=bool),
+ Validator('subsync.subsync_threshold', must_exist=True, default=90, is_type_of=int, gte=0, lte=100),
+ Validator('subsync.use_subsync_movie_threshold', must_exist=True, default=False, is_type_of=bool),
+ Validator('subsync.subsync_movie_threshold', must_exist=True, default=70, is_type_of=int, gte=0, lte=100),
+ Validator('subsync.debug', must_exist=True, default=False, is_type_of=bool),
+ Validator('subsync.force_audio', must_exist=True, default=False, is_type_of=bool),
+
+ # series_scores section
+ Validator('series_scores.hash', must_exist=True, default=359, is_type_of=int),
+ Validator('series_scores.series', must_exist=True, default=180, is_type_of=int),
+ Validator('series_scores.year', must_exist=True, default=90, is_type_of=int),
+ Validator('series_scores.season', must_exist=True, default=30, is_type_of=int),
+ Validator('series_scores.episode', must_exist=True, default=30, is_type_of=int),
+ Validator('series_scores.release_group', must_exist=True, default=14, is_type_of=int),
+ Validator('series_scores.source', must_exist=True, default=7, is_type_of=int),
+ Validator('series_scores.audio_codec', must_exist=True, default=3, is_type_of=int),
+ Validator('series_scores.resolution', must_exist=True, default=2, is_type_of=int),
+ Validator('series_scores.video_codec', must_exist=True, default=2, is_type_of=int),
+ Validator('series_scores.streaming_service', must_exist=True, default=1, is_type_of=int),
+ Validator('series_scores.hearing_impaired', must_exist=True, default=1, is_type_of=int),
+
+ # movie_scores section
+ Validator('movie_scores.hash', must_exist=True, default=119, is_type_of=int),
+ Validator('movie_scores.title', must_exist=True, default=60, is_type_of=int),
+ Validator('movie_scores.year', must_exist=True, default=30, is_type_of=int),
+ Validator('movie_scores.release_group', must_exist=True, default=13, is_type_of=int),
+ Validator('movie_scores.source', must_exist=True, default=7, is_type_of=int),
+ Validator('movie_scores.audio_codec', must_exist=True, default=3, is_type_of=int),
+ Validator('movie_scores.resolution', must_exist=True, default=2, is_type_of=int),
+ Validator('movie_scores.video_codec', must_exist=True, default=2, is_type_of=int),
+ Validator('movie_scores.streaming_service', must_exist=True, default=1, is_type_of=int),
+ Validator('movie_scores.edition', must_exist=True, default=1, is_type_of=int),
+ Validator('movie_scores.hearing_impaired', must_exist=True, default=1, is_type_of=int),
+
+ # postgresql section
+ Validator('postgresql.enabled', must_exist=True, default=False, is_type_of=bool),
+ Validator('postgresql.host', must_exist=True, default='localhost', is_type_of=str),
+ Validator('postgresql.port', must_exist=True, default=5432, is_type_of=int, gte=1, lte=65535),
+ Validator('postgresql.database', must_exist=True, default='', is_type_of=str),
+ Validator('postgresql.username', must_exist=True, default='', is_type_of=str),
+ Validator('postgresql.password', must_exist=True, default='', is_type_of=str),
+]
+
+
+def convert_ini_to_yaml(config_file):
+ import configparser
+ import yaml
+ config_object = configparser.ConfigParser()
+ file = open(config_file, "r")
+ config_object.read_file(file)
+ output_dict = dict()
+ sections = config_object.sections()
+ for section in sections:
+ items = config_object.items(section)
+ output_dict[section] = dict()
+ for item in items:
+ try:
+ output_dict[section].update({item[0]: ast.literal_eval(item[1])})
+ except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):
+ output_dict[section].update({item[0]: item[1]})
+ with open(os.path.join(os.path.dirname(config_file), 'config.yaml'), 'w') as file:
+ yaml.dump(output_dict, file)
+ os.rename(config_file, config_file + '.old')
+
+
+config_yaml_file = os.path.join(args.config_dir, 'config', 'config.yaml')
+config_ini_file = os.path.join(args.config_dir, 'config', 'config.ini')
+if os.path.exists(config_ini_file) and not os.path.exists(config_yaml_file):
+ convert_ini_to_yaml(config_ini_file)
+elif not os.path.exists(config_yaml_file):
+ if not os.path.isdir(os.path.dirname(config_yaml_file)):
+ os.makedirs(os.path.dirname(config_yaml_file))
+ open(config_yaml_file, mode='w').close()
+
+settings = Dynaconf(
+ settings_file=config_yaml_file,
+ core_loaders=['YAML'],
+ apply_default_on_none=True,
+)
+
+settings.validators.register(*validators)
+
+failed_validator = True
+while failed_validator:
+ try:
+ settings.validators.validate_all()
+ failed_validator = False
+ except ValidationError as e:
+ current_validator_details = e.details[0][0]
+ if hasattr(current_validator_details, 'default') and current_validator_details.default is not empty:
+ settings[current_validator_details.names[0]] = current_validator_details.default
+ else:
+ logging.critical(f"Value for {current_validator_details.names[0]} doesn't pass validation and there's no "
+ f"default value. This issue must be reported. Bazarr won't works until it's been fixed.")
+ os._exit(0)
+
+
+def write_config():
+ write(settings_path=config_yaml_file,
+ settings_data={k.lower(): v for k, v in settings.as_dict().items()},
+ merge=False)
+
+
base_url = settings.general.base_url.rstrip('/')
ignore_keys = ['flask_secret_key']
-raw_keys = ['movie_default_forced', 'serie_default_forced']
-
array_keys = ['excluded_tags',
'exclude',
'included_codecs',
@@ -305,79 +401,50 @@ array_keys = ['excluded_tags',
'path_mappings_movie',
'language_equals']
-str_keys = ['chmod']
-
empty_values = ['', 'None', 'null', 'undefined', None, []]
+str_keys = ['chmod']
+
# Increase Sonarr and Radarr sync interval since we now use SignalR feed to update in real time
-if int(settings.sonarr.series_sync) < 15:
- settings.sonarr.series_sync = "60"
-if int(settings.sonarr.episodes_sync) < 15:
- settings.sonarr.episodes_sync = "60"
-if int(settings.radarr.movies_sync) < 15:
- settings.radarr.movies_sync = "60"
+if settings.sonarr.series_sync < 15:
+ settings.sonarr.series_sync = 60
+if settings.sonarr.episodes_sync < 15:
+ settings.sonarr.episodes_sync = 60
+if settings.radarr.movies_sync < 15:
+ settings.radarr.movies_sync = 60
# Make sure to get of double slashes in base_url
settings.general.base_url = base_url_slash_cleaner(uri=settings.general.base_url)
settings.sonarr.base_url = base_url_slash_cleaner(uri=settings.sonarr.base_url)
settings.radarr.base_url = base_url_slash_cleaner(uri=settings.radarr.base_url)
-# fixing issue with improper page_size value
-if settings.general.page_size not in ['25', '50', '100', '250', '500', '1000']:
- settings.general.page_size = defaults['general']['page_size']
-
# increase delay between searches to reduce impact on providers
-if settings.general.wanted_search_frequency == '3':
- settings.general.wanted_search_frequency = '6'
-if settings.general.wanted_search_frequency_movie == '3':
- settings.general.wanted_search_frequency_movie = '6'
+if settings.general.wanted_search_frequency == 3:
+ settings.general.wanted_search_frequency = 6
+if settings.general.wanted_search_frequency_movie == 3:
+ settings.general.wanted_search_frequency_movie = 6
# save updated settings to file
-if os.path.exists(os.path.join(args.config_dir, 'config', 'config.ini')):
- with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
+write_config()
def get_settings():
- result = dict()
- sections = settings.sections()
-
- for sec in sections:
- sec_values = settings.items(sec, False)
- values_dict = dict()
-
- for sec_val in sec_values:
- key = sec_val[0]
- value = sec_val[1]
-
- if key in ignore_keys:
- continue
-
- if key not in raw_keys:
- # Do some postprocessings
- if value in empty_values:
- if key in array_keys:
- value = []
- else:
- continue
- elif key in array_keys:
- value = get_array_from(value)
- elif value == 'True':
- value = True
- elif value == 'False':
- value = False
+ # return {k.lower(): v for k, v in settings.as_dict().items()}
+ settings_to_return = {}
+ for k, v in settings.as_dict().items():
+ if isinstance(v, dict):
+ k = k.lower()
+ settings_to_return[k] = dict()
+ for subk, subv in v.items():
+ if subk.lower() in ignore_keys:
+ continue
+ if subv in empty_values and subk.lower() in array_keys:
+ settings_to_return[k].update({subk: []})
+ elif subk == 'subzero_mods':
+ settings_to_return[k].update({subk: get_array_from(subv)})
else:
- if key not in str_keys:
- try:
- value = int(value)
- except ValueError:
- pass
-
- values_dict[key] = value
-
- result[sec] = values_dict
-
- return result
+ settings_to_return[k].update({subk: subv})
+ return settings_to_return
def save_settings(settings_items):
@@ -408,24 +475,31 @@ def save_settings(settings_items):
settings_keys = key.split('-')
- # Make sure that text based form values aren't pass as list
+ # Make sure that text based form values aren't passed as list
if isinstance(value, list) and len(value) == 1 and settings_keys[-1] not in array_keys:
value = value[0]
if value in empty_values and value != '':
value = None
+ # try to cast string as integer
+ if isinstance(value, str) and settings_keys[-1] not in str_keys:
+ try:
+ value = int(value)
+ except ValueError:
+ pass
+
# Make sure empty language list are stored correctly
if settings_keys[-1] in array_keys and value[0] in empty_values:
value = []
# Handle path mappings settings since they are array in array
if settings_keys[-1] in ['path_mappings', 'path_mappings_movie']:
- value = [v.split(',') for v in value]
+ value = [x.split(',') for x in value if isinstance(x, str)]
if value == 'true':
- value = 'True'
+ value = True
elif value == 'false':
- value = 'False'
+ value = False
if key in ['settings-general-use_embedded_subs', 'settings-general-ignore_pgs_subs',
'settings-general-ignore_vobsub_subs', 'settings-general-ignore_ass_subs']:
@@ -553,14 +627,13 @@ def save_settings(settings_items):
reset_throttled_providers(only_auth_or_conf_error=True)
if settings_keys[0] == 'settings':
- settings[settings_keys[1]][settings_keys[2]] = str(value)
+ settings[settings_keys[1]][settings_keys[2]] = value
if settings_keys[0] == 'subzero':
mod = settings_keys[1]
- enabled = value == 'True'
- if mod in subzero_mods and not enabled:
+ if mod in subzero_mods and not value:
subzero_mods.remove(mod)
- elif enabled:
+ elif value:
subzero_mods.append(mod)
# Handle color
@@ -581,77 +654,82 @@ def save_settings(settings_items):
from .scheduler import scheduler
from subtitles.indexer.series import list_missing_subtitles
from subtitles.indexer.movies import list_missing_subtitles_movies
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
scheduler.add_job(list_missing_subtitles, kwargs={'send_event': True})
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
scheduler.add_job(list_missing_subtitles_movies, kwargs={'send_event': True})
if undefined_subtitles_track_default_changed:
from .scheduler import scheduler
from subtitles.indexer.series import series_full_scan_subtitles
from subtitles.indexer.movies import movies_full_scan_subtitles
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
scheduler.add_job(series_full_scan_subtitles, kwargs={'use_cache': True})
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
scheduler.add_job(movies_full_scan_subtitles, kwargs={'use_cache': True})
if audio_tracks_parsing_changed:
from .scheduler import scheduler
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
from sonarr.sync.series import update_series
scheduler.add_job(update_series, kwargs={'send_event': True}, max_instances=1)
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
from radarr.sync.movies import update_movies
scheduler.add_job(update_movies, kwargs={'send_event': True}, max_instances=1)
if update_subzero:
- settings.set('general', 'subzero_mods', ','.join(subzero_mods))
+ settings.general.subzero_mods = ','.join(subzero_mods)
- with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
-
- # Reconfigure Bazarr to reflect changes
- if configure_debug:
- from .logger import configure_logging
- configure_logging(settings.general.getboolean('debug') or args.debug)
-
- if configure_captcha:
- configure_captcha_func()
-
- if update_schedule:
- from .scheduler import scheduler
- from .event_handler import event_stream
- scheduler.update_configurable_tasks()
- event_stream(type='task')
-
- if sonarr_changed:
- from .signalr_client import sonarr_signalr_client
- try:
- sonarr_signalr_client.restart()
- except Exception:
- pass
-
- if radarr_changed:
- from .signalr_client import radarr_signalr_client
- try:
- radarr_signalr_client.restart()
- except Exception:
- pass
-
- if update_path_map:
- from utilities.path_mappings import path_mappings
- path_mappings.update()
-
- if configure_proxy:
- configure_proxy_func()
-
- if exclusion_updated:
- from .event_handler import event_stream
- event_stream(type='badges')
- if sonarr_exclusion_updated:
- event_stream(type='reset-episode-wanted')
- if radarr_exclusion_updated:
- event_stream(type='reset-movie-wanted')
+ try:
+ settings.validators.validate()
+ except ValidationError:
+ settings.reload()
+ raise
+ else:
+ write_config()
+
+ # Reconfigure Bazarr to reflect changes
+ if configure_debug:
+ from .logger import configure_logging
+ configure_logging(settings.general.debug or args.debug)
+
+ if configure_captcha:
+ configure_captcha_func()
+
+ if update_schedule:
+ from .scheduler import scheduler
+ from .event_handler import event_stream
+ scheduler.update_configurable_tasks()
+ event_stream(type='task')
+
+ if sonarr_changed:
+ from .signalr_client import sonarr_signalr_client
+ try:
+ sonarr_signalr_client.restart()
+ except Exception:
+ pass
+
+ if radarr_changed:
+ from .signalr_client import radarr_signalr_client
+ try:
+ radarr_signalr_client.restart()
+ except Exception:
+ pass
+
+ if update_path_map:
+ from utilities.path_mappings import path_mappings
+ path_mappings.update()
+
+ if configure_proxy:
+ configure_proxy_func()
+
+ if exclusion_updated:
+ from .event_handler import event_stream
+ event_stream(type='badges')
+ if sonarr_exclusion_updated:
+ event_stream(type='reset-episode-wanted')
+ if radarr_exclusion_updated:
+ event_stream(type='reset-movie-wanted')
def get_array_from(property):
@@ -681,15 +759,15 @@ def configure_captcha_func():
def configure_proxy_func():
- if settings.proxy.type != 'None':
+ if settings.proxy.type:
if settings.proxy.username != '' and settings.proxy.password != '':
proxy = settings.proxy.type + '://' + quote_plus(settings.proxy.username) + ':' + \
- quote_plus(settings.proxy.password) + '@' + settings.proxy.url + ':' + settings.proxy.port
+ quote_plus(settings.proxy.password) + '@' + settings.proxy.url + ':' + str(settings.proxy.port)
else:
- proxy = settings.proxy.type + '://' + settings.proxy.url + ':' + settings.proxy.port
+ proxy = settings.proxy.type + '://' + settings.proxy.url + ':' + str(settings.proxy.port)
os.environ['HTTP_PROXY'] = str(proxy)
os.environ['HTTPS_PROXY'] = str(proxy)
- exclude = ','.join(get_array_from(settings.proxy.exclude))
+ exclude = ','.join(settings.proxy.exclude)
os.environ['NO_PROXY'] = exclude
diff --git a/bazarr/app/database.py b/bazarr/app/database.py
index 08fcf7db4..880cc1f8b 100644
--- a/bazarr/app/database.py
+++ b/bazarr/app/database.py
@@ -18,12 +18,16 @@ from sqlalchemy.pool import NullPool
from flask_sqlalchemy import SQLAlchemy
-from .config import settings, get_array_from
+from .config import settings
from .get_args import args
logger = logging.getLogger(__name__)
-postgresql = (os.getenv("POSTGRES_ENABLED", settings.postgresql.enabled).lower() == 'true')
+POSTGRES_ENABLED_ENV = os.getenv("POSTGRES_ENABLED")
+if POSTGRES_ENABLED_ENV:
+ postgresql = POSTGRES_ENABLED_ENV.lower() == 'true'
+else:
+ postgresql = settings.postgresql.enabled
region = make_region().configure('dogpile.cache.memory')
@@ -324,30 +328,30 @@ def migrate_db(app):
def get_exclusion_clause(exclusion_type):
where_clause = []
if exclusion_type == 'series':
- tagsList = ast.literal_eval(settings.sonarr.excluded_tags)
+ tagsList = settings.sonarr.excluded_tags
for tag in tagsList:
where_clause.append(~(TableShows.tags.contains("\'" + tag + "\'")))
else:
- tagsList = ast.literal_eval(settings.radarr.excluded_tags)
+ tagsList = settings.radarr.excluded_tags
for tag in tagsList:
where_clause.append(~(TableMovies.tags.contains("\'" + tag + "\'")))
if exclusion_type == 'series':
- monitoredOnly = settings.sonarr.getboolean('only_monitored')
+ monitoredOnly = settings.sonarr.only_monitored
if monitoredOnly:
where_clause.append((TableEpisodes.monitored == 'True')) # noqa E712
where_clause.append((TableShows.monitored == 'True')) # noqa E712
else:
- monitoredOnly = settings.radarr.getboolean('only_monitored')
+ monitoredOnly = settings.radarr.only_monitored
if monitoredOnly:
where_clause.append((TableMovies.monitored == 'True')) # noqa E712
if exclusion_type == 'series':
- typesList = get_array_from(settings.sonarr.excluded_series_types)
+ typesList = settings.sonarr.excluded_series_types
for item in typesList:
where_clause.append((TableShows.seriesType != item))
- exclude_season_zero = settings.sonarr.getboolean('exclude_season_zero')
+ exclude_season_zero = settings.sonarr.exclude_season_zero
if exclude_season_zero:
where_clause.append((TableEpisodes.season != 0))
diff --git a/bazarr/app/get_providers.py b/bazarr/app/get_providers.py
index f0595f83a..e4e55fcd1 100644
--- a/bazarr/app/get_providers.py
+++ b/bazarr/app/get_providers.py
@@ -1,6 +1,5 @@
# coding=utf-8
-import ast
import os
import datetime
import pytz
@@ -21,7 +20,7 @@ from subliminal import region as subliminal_cache_region
from subliminal_patch.extensions import provider_registry
from app.get_args import args
-from app.config import settings, get_array_from
+from app.config import settings
from languages.get_languages import CustomLanguage
from app.event_handler import event_stream
from utilities.binaries import get_binary
@@ -126,7 +125,7 @@ throttle_count = {}
def provider_pool():
- if settings.general.getboolean('multithreading'):
+ if settings.general.multithreading:
return subliminal_patch.core.SZAsyncProviderPool
return subliminal_patch.core.SZProviderPool
@@ -157,7 +156,7 @@ def _lang_from_str(content: str):
def get_language_equals(settings_=None):
settings_ = settings_ or settings
- equals = get_array_from(settings_.general.language_equals)
+ equals = settings_.general.language_equals
if not equals:
return []
@@ -177,7 +176,7 @@ def get_language_equals(settings_=None):
def get_providers():
providers_list = []
existing_providers = provider_registry.names()
- providers = [x for x in get_array_from(settings.general.enabled_providers) if x in existing_providers]
+ providers = [x for x in settings.general.enabled_providers if x in existing_providers]
for provider in providers:
reason, until, throttle_desc = tp.get(provider, (None, None, None))
providers_list.append(provider)
@@ -205,9 +204,9 @@ def get_providers():
def get_enabled_providers():
# return enabled provider including those who can be throttled
- try:
- return ast.literal_eval(settings.general.enabled_providers)
- except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError):
+ if isinstance(settings.general.enabled_providers, list):
+ return settings.general.enabled_providers
+ else:
return []
@@ -222,32 +221,28 @@ def get_providers_auth():
'password': settings.addic7ed.password,
'cookies': settings.addic7ed.cookies,
'user_agent': settings.addic7ed.user_agent,
- 'is_vip': settings.addic7ed.getboolean('vip'),
+ 'is_vip': settings.addic7ed.vip,
},
'opensubtitles': {
'username': settings.opensubtitles.username,
'password': settings.opensubtitles.password,
- 'use_tag_search': settings.opensubtitles.getboolean(
- 'use_tag_search'
- ),
+ 'use_tag_search': settings.opensubtitles.use_tag_search,
'only_foreign': False, # fixme
'also_foreign': False, # fixme
- 'is_vip': settings.opensubtitles.getboolean('vip'),
- 'use_ssl': settings.opensubtitles.getboolean('ssl'),
+ 'is_vip': settings.opensubtitles.vip,
+ 'use_ssl': settings.opensubtitles.ssl,
'timeout': int(settings.opensubtitles.timeout) or 15,
- 'skip_wrong_fps': settings.opensubtitles.getboolean(
- 'skip_wrong_fps'
- ),
+ 'skip_wrong_fps': settings.opensubtitles.skip_wrong_fps,
},
'opensubtitlescom': {'username': settings.opensubtitlescom.username,
'password': settings.opensubtitlescom.password,
- 'use_hash': settings.opensubtitlescom.getboolean('use_hash'),
+ 'use_hash': settings.opensubtitlescom.use_hash,
'api_key': 's38zmzVlW7IlYruWi7mHwDYl2SfMQoC1'
},
'podnapisi': {
'only_foreign': False, # fixme
'also_foreign': False, # fixme
- 'verify_ssl': settings.podnapisi.getboolean('verify_ssl')
+ 'verify_ssl': settings.podnapisi.verify_ssl
},
'subscene': {
'username': settings.subscene.username,
@@ -257,9 +252,7 @@ def get_providers_auth():
'legendasdivx': {
'username': settings.legendasdivx.username,
'password': settings.legendasdivx.password,
- 'skip_wrong_fps': settings.legendasdivx.getboolean(
- 'skip_wrong_fps'
- ),
+ 'skip_wrong_fps': settings.legendasdivx.skip_wrong_fps,
},
'xsubs': {
'username': settings.xsubs.username,
@@ -276,7 +269,7 @@ def get_providers_auth():
'titulky': {
'username': settings.titulky.username,
'password': settings.titulky.password,
- 'approved_only': settings.titulky.getboolean('approved_only'),
+ 'approved_only': settings.titulky.approved_only,
},
'titlovi': {
'username': settings.titlovi.username,
@@ -287,13 +280,13 @@ def get_providers_auth():
'hashed_password': settings.ktuvit.hashed_password,
},
'embeddedsubtitles': {
- 'included_codecs': get_array_from(settings.embeddedsubtitles.included_codecs),
- 'hi_fallback': settings.embeddedsubtitles.getboolean('hi_fallback'),
+ 'included_codecs': settings.embeddedsubtitles.included_codecs,
+ 'hi_fallback': settings.embeddedsubtitles.hi_fallback,
'cache_dir': os.path.join(args.config_dir, "cache"),
'ffprobe_path': _FFPROBE_BINARY,
'ffmpeg_path': _FFMPEG_BINARY,
'timeout': settings.embeddedsubtitles.timeout,
- 'unknown_as_english': settings.embeddedsubtitles.getboolean('unknown_as_english'),
+ 'unknown_as_english': settings.embeddedsubtitles.unknown_as_english,
},
'karagarga': {
'username': settings.karagarga.username,
@@ -306,7 +299,7 @@ def get_providers_auth():
'passkey': settings.hdbits.passkey,
},
'subf2m': {
- 'verify_ssl': settings.subf2m.getboolean('verify_ssl'),
+ 'verify_ssl': settings.subf2m.verify_ssl,
'user_agent': settings.subf2m.user_agent,
},
'whisperai': {
@@ -414,7 +407,7 @@ def throttled_count(name):
def update_throttled_provider():
existing_providers = provider_registry.names()
- providers_list = [x for x in get_array_from(settings.general.enabled_providers) if x in existing_providers]
+ providers_list = [x for x in settings.general.enabled_providers if x in existing_providers]
for provider in list(tp):
if provider not in providers_list:
@@ -448,7 +441,7 @@ def list_throttled_providers():
update_throttled_provider()
throttled_providers = []
existing_providers = provider_registry.names()
- providers = [x for x in get_array_from(settings.general.enabled_providers) if x in existing_providers]
+ providers = [x for x in settings.general.enabled_providers if x in existing_providers]
for provider in providers:
reason, until, throttle_desc = tp.get(provider, (None, None, None))
throttled_providers.append([provider, reason, pretty.date(until)])
diff --git a/bazarr/app/scheduler.py b/bazarr/app/scheduler.py
index cac3b9c33..054ad6f3d 100644
--- a/bazarr/app/scheduler.py
+++ b/bazarr/app/scheduler.py
@@ -160,14 +160,14 @@ class Scheduler:
return task_list
def __sonarr_update_task(self):
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
self.aps_scheduler.add_job(
update_series, IntervalTrigger(minutes=int(settings.sonarr.series_sync)), max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_series', name='Sync with Sonarr',
replace_existing=True)
def __radarr_update_task(self):
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
self.aps_scheduler.add_job(
update_movies, IntervalTrigger(minutes=int(settings.radarr.movies_sync)), max_instances=1,
coalesce=True, misfire_grace_time=15, id='update_movies', name='Sync with Radarr',
@@ -200,7 +200,7 @@ class Scheduler:
pass
def __sonarr_full_update_task(self):
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
full_update = settings.sonarr.full_update
if full_update == "Daily":
self.aps_scheduler.add_job(
@@ -220,7 +220,7 @@ class Scheduler:
name='Index all Episode Subtitles from disk', replace_existing=True)
def __radarr_full_update_task(self):
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
full_update = settings.radarr.full_update
if full_update == "Daily":
self.aps_scheduler.add_job(
@@ -242,7 +242,7 @@ class Scheduler:
if not args.no_update and os.environ["BAZARR_VERSION"] != '':
task_name = 'Update Bazarr'
- if settings.general.getboolean('auto_update'):
+ if settings.general.auto_update:
self.aps_scheduler.add_job(
check_if_new_update, IntervalTrigger(hours=6), max_instances=1, coalesce=True,
misfire_grace_time=15, id='update_bazarr', name=task_name, replace_existing=True)
@@ -264,13 +264,13 @@ class Scheduler:
id='update_announcements', name='Update Announcements File', replace_existing=True)
def __search_wanted_subtitles_task(self):
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
self.aps_scheduler.add_job(
wanted_search_missing_subtitles_series,
IntervalTrigger(hours=int(settings.general.wanted_search_frequency)), max_instances=1, coalesce=True,
misfire_grace_time=15, id='wanted_search_missing_subtitles_series', replace_existing=True,
name='Search for wanted Series Subtitles')
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
self.aps_scheduler.add_job(
wanted_search_missing_subtitles_movies,
IntervalTrigger(hours=int(settings.general.wanted_search_frequency_movie)), max_instances=1,
@@ -278,8 +278,8 @@ class Scheduler:
name='Search for wanted Movies Subtitles', replace_existing=True)
def __upgrade_subtitles_task(self):
- if settings.general.getboolean('upgrade_subs') and \
- (settings.general.getboolean('use_sonarr') or settings.general.getboolean('use_radarr')):
+ if settings.general.upgrade_subs and \
+ (settings.general.use_sonarr or settings.general.use_radarr):
self.aps_scheduler.add_job(
upgrade_subtitles, IntervalTrigger(hours=int(settings.general.upgrade_frequency)), max_instances=1,
coalesce=True, misfire_grace_time=15, id='upgrade_subtitles',
@@ -303,9 +303,9 @@ scheduler = Scheduler()
# Force the execution of the sync process with Sonarr and Radarr after migration to v0.9.1
if 'BAZARR_AUDIO_PROFILES_MIGRATION' in os.environ:
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
scheduler.aps_scheduler.modify_job('update_series', next_run_time=datetime.now())
scheduler.aps_scheduler.modify_job('sync_episodes', next_run_time=datetime.now())
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
scheduler.aps_scheduler.modify_job('update_movies', next_run_time=datetime.now())
del os.environ['BAZARR_AUDIO_PROFILES_MIGRATION']
diff --git a/bazarr/app/signalr_client.py b/bazarr/app/signalr_client.py
index 8031305f4..5e78a12ea 100644
--- a/bazarr/app/signalr_client.py
+++ b/bazarr/app/signalr_client.py
@@ -86,7 +86,7 @@ class SonarrSignalrClientLegacy:
if self.connection:
if self.connection.started:
self.stop(log=False)
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
self.start()
def exception_handler(self):
@@ -133,7 +133,7 @@ class SonarrSignalrClient:
if self.connection:
if self.connection.transport.state.value in [0, 1, 2]:
self.stop()
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
self.start()
def exception_handler(self):
@@ -200,7 +200,7 @@ class RadarrSignalrClient:
if self.connection:
if self.connection.transport.state.value in [0, 1, 2]:
self.stop()
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
self.start()
def exception_handler(self):
@@ -300,11 +300,11 @@ def dispatcher(data):
elif topic == 'episode':
logging.debug(f'Event received from Sonarr for episode: {series_title} ({series_year}) - '
f'S{season_number:0>2}E{episode_number:0>2} - {episode_title}')
- sync_one_episode(episode_id=media_id, defer_search=settings.sonarr.getboolean('defer_search_signalr'))
+ sync_one_episode(episode_id=media_id, defer_search=settings.sonarr.defer_search_signalr)
elif topic == 'movie':
logging.debug(f'Event received from Radarr for movie: {movie_title} ({movie_year})')
update_one_movie(movie_id=media_id, action=action,
- defer_search=settings.radarr.getboolean('defer_search_signalr'))
+ defer_search=settings.radarr.defer_search_signalr)
except Exception as e:
logging.debug('BAZARR an exception occurred while parsing SignalR feed: {}'.format(repr(e)))
finally:
diff --git a/bazarr/init.py b/bazarr/init.py
index 0e91e4afb..2afbba48d 100644
--- a/bazarr/init.py
+++ b/bazarr/init.py
@@ -11,7 +11,7 @@ import rarfile
from dogpile.cache.region import register_backend as register_cache_backend
-from app.config import settings, configure_captcha_func, get_array_from
+from app.config import settings, configure_captcha_func, write_config
from app.get_args import args
from app.logger import configure_logging
from utilities.binaries import get_binary, BinaryNotFound
@@ -62,7 +62,7 @@ configure_captcha_func()
from ga4mp import GtagMP # noqa E402
# configure logging
-configure_logging(settings.general.getboolean('debug') or args.debug)
+configure_logging(settings.general.debug or args.debug)
import logging # noqa E402
@@ -111,30 +111,14 @@ if not args.no_update:
restart_file.close()
os._exit(0)
-# create random api_key if there's none in config.ini
-if not settings.auth.apikey or settings.auth.apikey.startswith("b'"):
- from binascii import hexlify
- settings.auth.apikey = hexlify(os.urandom(16)).decode()
- with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
-
-# create random Flask secret_key if there's none in config.ini
-if not settings.general.flask_secret_key:
- from binascii import hexlify
- settings.general.flask_secret_key = hexlify(os.urandom(16)).decode()
- with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
-
# change default base_url to ''
settings.general.base_url = settings.general.base_url.rstrip('/')
-with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
+write_config()
# migrate enabled_providers from comma separated string to list
if isinstance(settings.general.enabled_providers, str) and not settings.general.enabled_providers.startswith('['):
settings.general.enabled_providers = str(settings.general.enabled_providers.split(","))
- with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
+ write_config()
# Read package_info (if exists) to override some settings by package maintainers
# This file can also provide some info about the package version and author
@@ -166,8 +150,7 @@ if os.path.isfile(package_info_file):
except Exception:
pass
else:
- with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
+ write_config()
# Configure dogpile file caching for Subliminal request
register_cache_backend("subzero.cache.file", "subzero.cache_backends.file", "SZFileBackend")
@@ -186,30 +169,24 @@ if not os.path.exists(os.path.join(args.config_dir, 'config', 'announcements.txt
get_announcements_to_file()
logging.debug("BAZARR Created announcements file")
-config_file = os.path.normpath(os.path.join(args.config_dir, 'config', 'config.ini'))
-
-# Move GA visitor from config.ini to dedicated file
-if settings.analytics.visitor:
+# Move GA visitor from config to dedicated file
+if 'visitor' in settings.analytics:
with open(os.path.normpath(os.path.join(args.config_dir, 'config', 'analytics.dat')), 'w+') as handle:
handle.write(settings.analytics.visitor)
- with open(os.path.normpath(os.path.join(args.config_dir, 'config', 'config.ini')), 'w+') as handle:
- settings.remove_option('analytics', 'visitor')
- settings.write(handle)
+ settings['analytics'].pop('visitor', None)
-# Clean unused settings from config.ini
-with open(os.path.normpath(os.path.join(args.config_dir, 'config', 'config.ini')), 'w+') as handle:
- settings.remove_option('general', 'throtteled_providers')
- settings.remove_option('general', 'update_restart')
- settings.write(handle)
+# Clean unused settings from config
+settings['general'].pop('throtteled_providers', None)
+settings['general'].pop('update_restart', None)
+write_config()
-# Remove deprecated providers from enabled providers in config.ini
+# Remove deprecated providers from enabled providers in config
from subliminal_patch.extensions import provider_registry # noqa E401
existing_providers = provider_registry.names()
-enabled_providers = get_array_from(settings.general.enabled_providers)
-settings.general.enabled_providers = str([x for x in enabled_providers if x in existing_providers])
-with open(os.path.join(args.config_dir, 'config', 'config.ini'), 'w+') as handle:
- settings.write(handle)
+enabled_providers = settings.general.enabled_providers
+settings.general.enabled_providers = [x for x in enabled_providers if x in existing_providers]
+write_config()
def init_binaries():
diff --git a/bazarr/main.py b/bazarr/main.py
index c00817571..e6186848e 100644
--- a/bazarr/main.py
+++ b/bazarr/main.py
@@ -28,7 +28,7 @@ if bazarr_version != '':
apply_update()
# Check for new update and install latest
-if args.no_update or not settings.general.getboolean('auto_update'):
+if args.no_update or not settings.general.auto_update:
# user have explicitly requested that we do not update or is using some kind of package/docker that prevent it
check_releases()
else:
@@ -74,9 +74,9 @@ login_auth = settings.auth.type
update_notifier()
if not args.no_signalr:
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
Thread(target=sonarr_signalr_client.start).start()
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
Thread(target=radarr_signalr_client.start).start()
diff --git a/bazarr/radarr/info.py b/bazarr/radarr/info.py
index d0790e9f3..cdac0ee65 100644
--- a/bazarr/radarr/info.py
+++ b/bazarr/radarr/info.py
@@ -26,7 +26,7 @@ class GetRadarrInfo:
return radarr_version
else:
radarr_version = ''
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
try:
rv = url_radarr() + "/api/system/status?apikey=" + settings.radarr.apikey
radarr_json = requests.get(rv, timeout=int(settings.radarr.http_timeout), verify=False, headers=headers).json()
@@ -75,7 +75,7 @@ get_radarr_info = GetRadarrInfo()
def url_radarr():
- if settings.radarr.getboolean('ssl'):
+ if settings.radarr.ssl:
protocol_radarr = "https"
else:
protocol_radarr = "http"
diff --git a/bazarr/radarr/sync/movies.py b/bazarr/radarr/sync/movies.py
index 6741a823f..34e137139 100644
--- a/bazarr/radarr/sync/movies.py
+++ b/bazarr/radarr/sync/movies.py
@@ -68,7 +68,7 @@ def update_movies(send_event=True):
logging.debug('BAZARR Starting movie sync from Radarr.')
apikey_radarr = settings.radarr.apikey
- movie_default_enabled = settings.general.getboolean('movie_default_enabled')
+ movie_default_enabled = settings.general.movie_default_enabled
if movie_default_enabled is True:
movie_default_profile = settings.general.movie_default_profile
@@ -179,7 +179,7 @@ def update_one_movie(movie_id, action, defer_search=False):
existing_movie.path)))
return
- movie_default_enabled = settings.general.getboolean('movie_default_enabled')
+ movie_default_enabled = settings.general.movie_default_enabled
if movie_default_enabled is True:
movie_default_profile = settings.general.movie_default_profile
diff --git a/bazarr/radarr/sync/parser.py b/bazarr/radarr/sync/parser.py
index 60b4c7024..b70b5e682 100644
--- a/bazarr/radarr/sync/parser.py
+++ b/bazarr/radarr/sync/parser.py
@@ -92,7 +92,7 @@ def movieParser(movie, action, tags_dict, movie_default_profile, audio_profiles)
videoCodec = None
audioCodec = None
- if settings.general.getboolean('parse_embedded_audio_track'):
+ if settings.general.parse_embedded_audio_track:
audio_language = embedded_audio_reader(path_mappings.path_replace_movie(movie['movieFile']['path']),
file_size=movie['movieFile']['size'],
movie_file_id=movie['movieFile']['id'],
diff --git a/bazarr/sonarr/info.py b/bazarr/sonarr/info.py
index 3f059531e..706ea6de7 100644
--- a/bazarr/sonarr/info.py
+++ b/bazarr/sonarr/info.py
@@ -26,7 +26,7 @@ class GetSonarrInfo:
return sonarr_version
else:
sonarr_version = ''
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
try:
sv = url_sonarr() + "/api/system/status?apikey=" + settings.sonarr.apikey
sonarr_json = requests.get(sv, timeout=int(settings.sonarr.http_timeout), verify=False, headers=headers).json()
@@ -75,7 +75,7 @@ get_sonarr_info = GetSonarrInfo()
def url_sonarr():
- if settings.sonarr.getboolean('ssl'):
+ if settings.sonarr.ssl:
protocol_sonarr = "https"
else:
protocol_sonarr = "http"
diff --git a/bazarr/sonarr/sync/parser.py b/bazarr/sonarr/sync/parser.py
index 63a693ab2..222a21500 100644
--- a/bazarr/sonarr/sync/parser.py
+++ b/bazarr/sonarr/sync/parser.py
@@ -32,7 +32,7 @@ def seriesParser(show, action, tags_dict, serie_default_profile, audio_profiles)
imdbId = show['imdbId'] if 'imdbId' in show else None
audio_language = []
- if not settings.general.getboolean('parse_embedded_audio_track'):
+ if not settings.general.parse_embedded_audio_track:
if get_sonarr_info.is_legacy():
audio_language = profile_id_to_language(show['qualityProfileId'], audio_profiles)
else:
@@ -98,7 +98,7 @@ def episodeParser(episode):
else:
sceneName = None
- if settings.general.getboolean('parse_embedded_audio_track'):
+ if settings.general.parse_embedded_audio_track:
audio_language = embedded_audio_reader(path_mappings.path_replace(episode['episodeFile']
['path']),
file_size=episode['episodeFile']['size'],
diff --git a/bazarr/sonarr/sync/series.py b/bazarr/sonarr/sync/series.py
index 6ed298913..cb346a178 100644
--- a/bazarr/sonarr/sync/series.py
+++ b/bazarr/sonarr/sync/series.py
@@ -23,7 +23,7 @@ def update_series(send_event=True):
if apikey_sonarr is None:
return
- serie_default_enabled = settings.general.getboolean('serie_default_enabled')
+ serie_default_enabled = settings.general.serie_default_enabled
if serie_default_enabled is True:
serie_default_profile = settings.general.serie_default_profile
@@ -134,7 +134,7 @@ def update_one_series(series_id, action):
event_stream(type='series', action='delete', payload=int(series_id))
return
- serie_default_enabled = settings.general.getboolean('serie_default_enabled')
+ serie_default_enabled = settings.general.serie_default_enabled
if serie_default_enabled is True:
serie_default_profile = settings.general.serie_default_profile
diff --git a/bazarr/subtitles/adaptive_searching.py b/bazarr/subtitles/adaptive_searching.py
index 6b04f7242..ebb92fadf 100644
--- a/bazarr/subtitles/adaptive_searching.py
+++ b/bazarr/subtitles/adaptive_searching.py
@@ -23,7 +23,7 @@ def is_search_active(desired_language, attempt_string):
@rtype: bool
"""
- if settings.general.getboolean('adaptive_searching'):
+ if settings.general.adaptive_searching:
logging.debug("Adaptive searching is enable, we'll see if it's time to search again...")
try:
# let's try to get a list of lists from the string representation in database
diff --git a/bazarr/subtitles/download.py b/bazarr/subtitles/download.py
index 28c46611b..1767be2c4 100644
--- a/bazarr/subtitles/download.py
+++ b/bazarr/subtitles/download.py
@@ -12,7 +12,7 @@ from subliminal_patch.core import save_subtitles
from subliminal_patch.core_persistent import download_best_subtitles
from subliminal_patch.score import ComputeScore
-from app.config import settings, get_array_from, get_scores
+from app.config import settings, get_scores
from app.database import TableEpisodes, TableMovies, database, select
from utilities.path_mappings import path_mappings
from utilities.helper import get_target_folder, force_unicode
@@ -31,7 +31,7 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
logging.debug('BAZARR Searching subtitles for this file: ' + path)
- if settings.general.getboolean('utf8_encode'):
+ if settings.general.utf8_encode:
os.environ["SZ_KEEP_ENCODING"] = ""
else:
os.environ["SZ_KEEP_ENCODING"] = "True"
@@ -52,7 +52,7 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
minimum_score_movie = settings.general.minimum_score_movie
min_score, max_score, scores = _get_scores(media_type, minimum_score_movie, minimum_score)
- subz_mods = get_array_from(settings.general.subzero_mods)
+ subz_mods = settings.general.subzero_mods
saved_any = False
if providers:
@@ -86,9 +86,9 @@ def generate_subtitles(path, languages, audio_language, sceneName, title, media_
try:
fld = get_target_folder(path)
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
- 'win') and settings.general.getboolean('chmod_enabled') else None
+ 'win') and settings.general.chmod_enabled else None
saved_subtitles = save_subtitles(video.original_path, subtitles,
- single=settings.general.getboolean('single_language'),
+ single=settings.general.single_language,
tags=None, # fixme
directory=fld,
chmod=chmod,
diff --git a/bazarr/subtitles/indexer/movies.py b/bazarr/subtitles/indexer/movies.py
index 4f6bbc4b0..1a90afcb2 100644
--- a/bazarr/subtitles/indexer/movies.py
+++ b/bazarr/subtitles/indexer/movies.py
@@ -25,7 +25,7 @@ def store_subtitles_movie(original_path, reversed_path, use_cache=True):
logging.debug('BAZARR started subtitles indexing for this file: ' + reversed_path)
actual_subtitles = []
if os.path.exists(reversed_path):
- if settings.general.getboolean('use_embedded_subs'):
+ if settings.general.use_embedded_subs:
logging.debug("BAZARR is trying to index embedded subtitles.")
item = database.execute(
select(TableMovies.movie_file_id, TableMovies.file_size)
@@ -41,10 +41,10 @@ def store_subtitles_movie(original_path, reversed_path, use_cache=True):
use_cache=use_cache)
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
try:
- if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
- (settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() ==
+ if (settings.general.ignore_pgs_subs and subtitle_codec.lower() == "pgs") or \
+ (settings.general.ignore_vobsub_subs and subtitle_codec.lower() ==
"vobsub") or \
- (settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() ==
+ (settings.general.ignore_ass_subs and subtitle_codec.lower() ==
"ass"):
logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language)))
continue
@@ -85,7 +85,7 @@ def store_subtitles_movie(original_path, reversed_path, use_cache=True):
os.stat(path_mappings.path_replace(x[1])).st_size == x[2]]
subtitles = search_external_subtitles(reversed_path, languages=get_language_set(),
- only_one=settings.general.getboolean('single_language'))
+ only_one=settings.general.single_language)
full_dest_folder_path = os.path.dirname(reversed_path)
if dest_folder:
if settings.general.subfolder == "absolute":
@@ -168,7 +168,7 @@ def list_missing_subtitles_movies(no=None, send_event=True):
TableMovies.audio_language)) \
.all()
- use_embedded_subs = settings.general.getboolean('use_embedded_subs')
+ use_embedded_subs = settings.general.use_embedded_subs
for movie_subtitles in movies_subtitles:
missing_subtitles_text = '[]'
@@ -264,7 +264,7 @@ def list_missing_subtitles_movies(no=None, send_event=True):
event_stream(type='badges')
-def movies_full_scan_subtitles(use_cache=settings.radarr.getboolean('use_ffprobe_cache')):
+def movies_full_scan_subtitles(use_cache=settings.radarr.use_ffprobe_cache):
movies = database.execute(
select(TableMovies.path))\
.all()
diff --git a/bazarr/subtitles/indexer/series.py b/bazarr/subtitles/indexer/series.py
index f325c0d86..9add0f6b2 100644
--- a/bazarr/subtitles/indexer/series.py
+++ b/bazarr/subtitles/indexer/series.py
@@ -25,7 +25,7 @@ def store_subtitles(original_path, reversed_path, use_cache=True):
logging.debug('BAZARR started subtitles indexing for this file: ' + reversed_path)
actual_subtitles = []
if os.path.exists(reversed_path):
- if settings.general.getboolean('use_embedded_subs'):
+ if settings.general.use_embedded_subs:
logging.debug("BAZARR is trying to index embedded subtitles.")
item = database.execute(
select(TableEpisodes.episode_file_id, TableEpisodes.file_size)
@@ -41,10 +41,10 @@ def store_subtitles(original_path, reversed_path, use_cache=True):
use_cache=use_cache)
for subtitle_language, subtitle_forced, subtitle_hi, subtitle_codec in subtitle_languages:
try:
- if (settings.general.getboolean("ignore_pgs_subs") and subtitle_codec.lower() == "pgs") or \
- (settings.general.getboolean("ignore_vobsub_subs") and subtitle_codec.lower() ==
+ if (settings.general.ignore_pgs_subs and subtitle_codec.lower() == "pgs") or \
+ (settings.general.ignore_vobsub_subs and subtitle_codec.lower() ==
"vobsub") or \
- (settings.general.getboolean("ignore_ass_subs") and subtitle_codec.lower() ==
+ (settings.general.ignore_ass_subs and subtitle_codec.lower() ==
"ass"):
logging.debug("BAZARR skipping %s sub for language: %s" % (subtitle_codec, alpha2_from_alpha3(subtitle_language)))
continue
@@ -84,7 +84,7 @@ def store_subtitles(original_path, reversed_path, use_cache=True):
os.stat(path_mappings.path_replace(x[1])).st_size == x[2]]
subtitles = search_external_subtitles(reversed_path, languages=get_language_set(),
- only_one=settings.general.getboolean('single_language'))
+ only_one=settings.general.single_language)
full_dest_folder_path = os.path.dirname(reversed_path)
if dest_folder:
if settings.general.subfolder == "absolute":
@@ -168,7 +168,7 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
.where(episodes_subtitles_clause))\
.all()
- use_embedded_subs = settings.general.getboolean('use_embedded_subs')
+ use_embedded_subs = settings.general.use_embedded_subs
for episode_subtitles in episodes_subtitles:
missing_subtitles_text = '[]'
@@ -266,7 +266,7 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
event_stream(type='badges')
-def series_full_scan_subtitles(use_cache=settings.sonarr.getboolean('use_ffprobe_cache')):
+def series_full_scan_subtitles(use_cache=settings.sonarr.use_ffprobe_cache):
episodes = database.execute(
select(TableEpisodes.path))\
.all()
diff --git a/bazarr/subtitles/manual.py b/bazarr/subtitles/manual.py
index 84d2f0c62..ca454e7ee 100644
--- a/bazarr/subtitles/manual.py
+++ b/bazarr/subtitles/manual.py
@@ -14,7 +14,7 @@ from subliminal_patch.core_persistent import list_all_subtitles, download_subtit
from subliminal_patch.score import ComputeScore
from languages.get_languages import alpha3_from_alpha2
-from app.config import get_scores, settings, get_array_from
+from app.config import get_scores, settings
from utilities.helper import get_target_folder, force_unicode
from app.database import get_profiles_list
@@ -158,7 +158,7 @@ def manual_download_subtitle(path, audio_language, hi, forced, subtitle, provide
use_original_format, profile_id):
logging.debug('BAZARR Manually downloading Subtitles for this file: ' + path)
- if settings.general.getboolean('utf8_encode'):
+ if settings.general.utf8_encode:
os.environ["SZ_KEEP_ENCODING"] = ""
else:
os.environ["SZ_KEEP_ENCODING"] = "True"
@@ -174,7 +174,7 @@ def manual_download_subtitle(path, audio_language, hi, forced, subtitle, provide
subtitle.language.forced = False
if use_original_format == 'True':
subtitle.use_original_format = use_original_format
- subtitle.mods = get_array_from(settings.general.subzero_mods)
+ subtitle.mods = settings.general.subzero_mods
video = get_video(force_unicode(path), title, sceneName, providers={provider}, media_type=media_type)
if video:
try:
@@ -193,9 +193,9 @@ def manual_download_subtitle(path, audio_language, hi, forced, subtitle, provide
return 'No valid Subtitles file found'
try:
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
- 'win') and settings.general.getboolean('chmod_enabled') else None
+ 'win') and settings.general.chmod_enabled else None
saved_subtitles = save_subtitles(video.original_path, [subtitle],
- single=settings.general.getboolean('single_language'),
+ single=settings.general.single_language,
tags=None, # fixme
directory=get_target_folder(path),
chmod=chmod,
diff --git a/bazarr/subtitles/processing.py b/bazarr/subtitles/processing.py
index f085da236..bc0ada4e3 100644
--- a/bazarr/subtitles/processing.py
+++ b/bazarr/subtitles/processing.py
@@ -39,7 +39,7 @@ class ProcessSubtitlesResult:
def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_upgrade=False, is_manual=False):
- use_postprocessing = settings.general.getboolean('use_postprocessing')
+ use_postprocessing = settings.general.use_postprocessing
postprocessing_cmd = settings.general.postprocessing_cmd
downloaded_provider = subtitle.provider_name
@@ -109,10 +109,10 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
percent_score, subtitle_id, downloaded_provider, series_id, episode_id)
if media_type == 'series':
- use_pp_threshold = settings.general.getboolean('use_postprocessing_threshold')
+ use_pp_threshold = settings.general.use_postprocessing_threshold
pp_threshold = int(settings.general.postprocessing_threshold)
else:
- use_pp_threshold = settings.general.getboolean('use_postprocessing_threshold_movie')
+ use_pp_threshold = settings.general.use_postprocessing_threshold_movie
pp_threshold = int(settings.general.postprocessing_threshold_movie)
if not use_pp_threshold or (use_pp_threshold and percent_score < pp_threshold):
diff --git a/bazarr/subtitles/sync.py b/bazarr/subtitles/sync.py
index 06d466f27..21025fe79 100644
--- a/bazarr/subtitles/sync.py
+++ b/bazarr/subtitles/sync.py
@@ -12,16 +12,16 @@ def sync_subtitles(video_path, srt_path, srt_lang, forced, media_type, percent_s
sonarr_episode_id=None, radarr_id=None):
if forced:
logging.debug('BAZARR cannot sync forced subtitles. Skipping sync routine.')
- elif not settings.subsync.getboolean('use_subsync'):
+ elif not settings.subsync.use_subsync:
logging.debug('BAZARR automatic syncing is disabled in settings. Skipping sync routine.')
else:
logging.debug(f'BAZARR automatic syncing is enabled in settings. We\'ll try to sync this '
f'subtitles: {srt_path}.')
if media_type == 'series':
- use_subsync_threshold = settings.subsync.getboolean('use_subsync_threshold')
+ use_subsync_threshold = settings.subsync.use_subsync_threshold
subsync_threshold = settings.subsync.subsync_threshold
else:
- use_subsync_threshold = settings.subsync.getboolean('use_subsync_movie_threshold')
+ use_subsync_threshold = settings.subsync.use_subsync_movie_threshold
subsync_threshold = settings.subsync.subsync_movie_threshold
if not use_subsync_threshold or (use_subsync_threshold and percent_score < float(subsync_threshold)):
diff --git a/bazarr/subtitles/tools/mods.py b/bazarr/subtitles/tools/mods.py
index 126050b1b..12c9bc6b0 100644
--- a/bazarr/subtitles/tools/mods.py
+++ b/bazarr/subtitles/tools/mods.py
@@ -19,7 +19,7 @@ def subtitles_apply_mods(language, subtitle_path, mods, use_original_format, vid
lang_obj = Language(language)
else:
lang_obj = custom.subzero_language()
- single = settings.general.getboolean('single_language')
+ single = settings.general.single_language
sub = Subtitle(lang_obj, mods=mods, original_format=use_original_format)
with open(subtitle_path, 'rb') as f:
diff --git a/bazarr/subtitles/tools/subsyncer.py b/bazarr/subtitles/tools/subsyncer.py
index 8e815fa25..e3f8f6943 100644
--- a/bazarr/subtitles/tools/subsyncer.py
+++ b/bazarr/subtitles/tools/subsyncer.py
@@ -54,11 +54,11 @@ class SubSyncer:
self.ffmpeg_path = os.path.dirname(ffmpeg_exe)
unparsed_args = [self.reference, '-i', self.srtin, '-o', self.srtout, '--ffmpegpath', self.ffmpeg_path, '--vad',
self.vad, '--log-dir-path', self.log_dir_path]
- if settings.subsync.getboolean('force_audio'):
+ if settings.subsync.force_audio:
unparsed_args.append('--no-fix-framerate')
unparsed_args.append('--reference-stream')
unparsed_args.append('a:0')
- if settings.subsync.getboolean('debug'):
+ if settings.subsync.debug:
unparsed_args.append('--make-test-case')
parser = make_parser()
self.args = parser.parse_args(args=unparsed_args)
@@ -72,10 +72,10 @@ class SubSyncer:
'{0}'.format(self.srtin))
raise OSError
else:
- if settings.subsync.getboolean('debug'):
+ if settings.subsync.debug:
return result
if os.path.isfile(self.srtout):
- if not settings.subsync.getboolean('debug'):
+ if not settings.subsync.debug:
os.remove(self.srtin)
os.rename(self.srtout, self.srtin)
diff --git a/bazarr/subtitles/upgrade.py b/bazarr/subtitles/upgrade.py
index ffa734c06..b2aa6bd76 100644
--- a/bazarr/subtitles/upgrade.py
+++ b/bazarr/subtitles/upgrade.py
@@ -24,8 +24,8 @@ from .download import generate_subtitles
def upgrade_subtitles():
- use_sonarr = settings.general.getboolean('use_sonarr')
- use_radarr = settings.general.getboolean('use_radarr')
+ use_sonarr = settings.general.use_sonarr
+ use_radarr = settings.general.use_radarr
if use_sonarr:
episodes_to_upgrade = get_upgradable_episode_subtitles()
@@ -218,7 +218,7 @@ def get_queries_condition_parameters():
days_to_upgrade_subs = settings.general.days_to_upgrade_subs
minimum_timestamp = (datetime.now() - timedelta(days=int(days_to_upgrade_subs)))
- if settings.general.getboolean('upgrade_manual'):
+ if settings.general.upgrade_manual:
query_actions = [1, 2, 3, 4, 6]
else:
query_actions = [1, 3]
@@ -244,7 +244,7 @@ def parse_language_string(language_string):
def get_upgradable_episode_subtitles():
- if not settings.general.getboolean('upgrade_subs'):
+ if not settings.general.upgrade_subs:
# return an empty set of rows
return select(TableHistory.id) \
.where(TableHistory.id.is_(None)) \
@@ -277,7 +277,7 @@ def get_upgradable_episode_subtitles():
def get_upgradable_movies_subtitles():
- if not settings.general.getboolean('upgrade_subs'):
+ if not settings.general.upgrade_subs:
# return an empty set of rows
return select(TableHistoryMovie.id) \
.where(TableHistoryMovie.id.is_(None)) \
diff --git a/bazarr/subtitles/upload.py b/bazarr/subtitles/upload.py
index bdb70cd8a..f721ec6dd 100644
--- a/bazarr/subtitles/upload.py
+++ b/bazarr/subtitles/upload.py
@@ -11,7 +11,7 @@ from subliminal_patch.subtitle import Subtitle
from pysubs2.formats import get_format_identifier
from languages.get_languages import language_from_alpha3, alpha2_from_alpha3, alpha3_from_alpha2
-from app.config import settings, get_array_from
+from app.config import settings
from utilities.helper import get_target_folder, force_unicode
from utilities.post_processing import pp_replace, set_chmod
from utilities.path_mappings import path_mappings
@@ -29,13 +29,13 @@ from .post_processing import postprocessing
def manual_upload_subtitle(path, language, forced, hi, media_type, subtitle, audio_language):
logging.debug(f'BAZARR Manually uploading subtitles for this file: {path}')
- single = settings.general.getboolean('single_language')
+ single = settings.general.single_language
- use_postprocessing = settings.general.getboolean('use_postprocessing')
+ use_postprocessing = settings.general.use_postprocessing
postprocessing_cmd = settings.general.postprocessing_cmd
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
- 'win') and settings.general.getboolean('chmod_enabled') else None
+ 'win') and settings.general.chmod_enabled else None
language = alpha3_from_alpha2(language)
@@ -78,7 +78,7 @@ def manual_upload_subtitle(path, language, forced, hi, media_type, subtitle, aud
sub = Subtitle(
lang_obj,
- mods=get_array_from(settings.general.subzero_mods),
+ mods=settings.general.subzero_mods,
original_format=use_original_format
)
@@ -87,7 +87,7 @@ def manual_upload_subtitle(path, language, forced, hi, media_type, subtitle, aud
logging.exception('BAZARR Invalid subtitle file: ' + subtitle.filename)
sub.mods = None
- if settings.general.getboolean('utf8_encode'):
+ if settings.general.utf8_encode:
sub.set_encoding("utf-8")
try:
diff --git a/bazarr/subtitles/utils.py b/bazarr/subtitles/utils.py
index 4f0ee2cd2..4fa0a8d27 100644
--- a/bazarr/subtitles/utils.py
+++ b/bazarr/subtitles/utils.py
@@ -37,7 +37,7 @@ def get_video(path, title, sceneName, providers=None, media_type="movie"):
hash_from = original_path
try:
- skip_hashing = settings.general.getboolean('skip_hashing')
+ skip_hashing = settings.general.skip_hashing
video = parse_video(path, hints=hints, skip_hashing=skip_hashing, dry_run=used_scene_name, providers=providers,
hash_from=hash_from)
video.used_scene_name = used_scene_name
diff --git a/bazarr/utilities/analytics.py b/bazarr/utilities/analytics.py
index 87ee46af0..9d9e06138 100644
--- a/bazarr/utilities/analytics.py
+++ b/bazarr/utilities/analytics.py
@@ -48,7 +48,7 @@ class EventTracker:
self.tracker.store.save()
def track_subtitles(self, provider, action, language):
- if not settings.analytics.getboolean('enabled'):
+ if not settings.analytics.enabled:
return
subtitles_event = self.tracker.create_new_event(name="subtitles")
diff --git a/bazarr/utilities/backup.py b/bazarr/utilities/backup.py
index 9697c2073..7080cf950 100644
--- a/bazarr/utilities/backup.py
+++ b/bazarr/utilities/backup.py
@@ -52,7 +52,7 @@ def backup_to_zip():
backup_filename = f"bazarr_backup_v{os.environ['BAZARR_VERSION']}_{now_string}.zip"
logging.debug(f'Backup filename will be: {backup_filename}')
- if not settings.postgresql.getboolean('enabled'):
+ if not settings.postgresql.enabled:
database_src_file = os.path.join(args.config_dir, 'db', 'bazarr.db')
logging.debug(f'Database file path to backup is: {database_src_file}')
@@ -71,7 +71,7 @@ def backup_to_zip():
database_backup_file = None
logging.exception('Unable to backup database file.')
- config_file = os.path.join(args.config_dir, 'config', 'config.ini')
+ config_file = os.path.join(args.config_dir, 'config', 'config.yaml')
logging.debug(f'Config file path to backup is: {config_file}')
with ZipFile(os.path.join(get_backup_path(), backup_filename), 'w') as backupZip:
@@ -83,12 +83,19 @@ def backup_to_zip():
logging.exception(f'Unable to delete temporary database backup file: {database_backup_file}')
else:
logging.debug('Database file is not included in backup. See previous exception')
- backupZip.write(config_file, 'config.ini')
+ backupZip.write(config_file, 'config.yaml')
def restore_from_backup():
- restore_config_path = os.path.join(get_restore_path(), 'config.ini')
- dest_config_path = os.path.join(args.config_dir, 'config', 'config.ini')
+ if os.path.isfile(os.path.join(get_restore_path(), 'config.yaml')):
+ restore_config_path = os.path.join(get_restore_path(), 'config.yaml')
+ dest_config_path = os.path.join(args.config_dir, 'config', 'config.yaml')
+ new_config = True
+ else:
+ restore_config_path = os.path.join(get_restore_path(), 'config.ini')
+ dest_config_path = os.path.join(args.config_dir, 'config', 'config.ini')
+ new_config = False
+
restore_database_path = os.path.join(get_restore_path(), 'bazarr.db')
dest_database_path = os.path.join(args.config_dir, 'db', 'bazarr.db')
@@ -97,8 +104,15 @@ def restore_from_backup():
shutil.copy(restore_config_path, dest_config_path)
os.remove(restore_config_path)
except OSError:
- logging.exception(f'Unable to restore or delete config.ini to {dest_config_path}')
- if not settings.postgresql.getboolean('enabled'):
+ logging.exception(f'Unable to restore or delete config file to {dest_config_path}')
+ else:
+ if new_config:
+ if os.path.isfile(os.path.join(get_restore_path(), 'config.ini')):
+ os.remove(os.path.join(get_restore_path(), 'config.ini'))
+ else:
+ if os.path.isfile(os.path.join(get_restore_path(), 'config.yaml')):
+ os.remove(os.path.join(get_restore_path(), 'config.yaml'))
+ if not settings.postgresql.enabled:
try:
shutil.copy(restore_database_path, dest_database_path)
os.remove(restore_database_path)
diff --git a/bazarr/utilities/health.py b/bazarr/utilities/health.py
index 68e21b639..36b1625f1 100644
--- a/bazarr/utilities/health.py
+++ b/bazarr/utilities/health.py
@@ -9,9 +9,9 @@ from radarr.rootfolder import check_radarr_rootfolder
def check_health():
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
check_sonarr_rootfolder()
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
check_radarr_rootfolder()
event_stream(type='badges')
@@ -24,7 +24,7 @@ def get_health_issues():
health_issues = []
# get Sonarr rootfolder issues
- if settings.general.getboolean('use_sonarr'):
+ if settings.general.use_sonarr:
rootfolder = database.execute(
select(TableShowsRootfolder.path,
TableShowsRootfolder.accessible,
@@ -36,7 +36,7 @@ def get_health_issues():
'issue': item.error})
# get Radarr rootfolder issues
- if settings.general.getboolean('use_radarr'):
+ if settings.general.use_radarr:
rootfolder = database.execute(
select(TableMoviesRootfolder.path,
TableMoviesRootfolder.accessible,
diff --git a/bazarr/utilities/path_mappings.py b/bazarr/utilities/path_mappings.py
index 8297238e3..d9bf7609e 100644
--- a/bazarr/utilities/path_mappings.py
+++ b/bazarr/utilities/path_mappings.py
@@ -2,7 +2,7 @@
import re
-from app.config import settings, get_array_from
+from app.config import settings
class PathMappings:
@@ -11,8 +11,8 @@ class PathMappings:
self.path_mapping_movies = []
def update(self):
- self.path_mapping_series = [x for x in get_array_from(settings.general.path_mappings) if x[0] != x[1]]
- self.path_mapping_movies = [x for x in get_array_from(settings.general.path_mappings_movie) if x[0] != x[1]]
+ self.path_mapping_series = [x for x in settings.general.path_mappings if x[0] != x[1]]
+ self.path_mapping_movies = [x for x in settings.general.path_mappings_movie if x[0] != x[1]]
def path_replace(self, path):
if path is None:
diff --git a/bazarr/utilities/post_processing.py b/bazarr/utilities/post_processing.py
index 581071256..673810241 100644
--- a/bazarr/utilities/post_processing.py
+++ b/bazarr/utilities/post_processing.py
@@ -43,7 +43,7 @@ def pp_replace(pp_command, episode, subtitles, language, language_code2, languag
def set_chmod(subtitles_path):
# apply chmod if required
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
- 'win') and settings.general.getboolean('chmod_enabled') else None
+ 'win') and settings.general.chmod_enabled else None
if chmod:
logging.debug(f"BAZARR setting permission to {chmod} on {subtitles_path} after custom post-processing.")
os.chmod(subtitles_path, chmod)
diff --git a/frontend/.env.development b/frontend/.env.development
index e4f4ff67d..f2294ccb6 100644
--- a/frontend/.env.development
+++ b/frontend/.env.development
@@ -9,7 +9,7 @@
# Bazarr configuration path, must be absolute path
# Vite will use this variable to find your bazarr's configuration file
-VITE_BAZARR_CONFIG_FILE="../data/config/config.ini"
+VITE_BAZARR_CONFIG_FILE="../data/config/config.yaml"
# Display update section in settings
VITE_CAN_UPDATE=true
diff --git a/frontend/config/configReader.ts b/frontend/config/configReader.ts
index a3d1597f5..78d682b51 100644
--- a/frontend/config/configReader.ts
+++ b/frontend/config/configReader.ts
@@ -2,48 +2,34 @@
/// <reference types="node" />
import { readFile } from "fs/promises";
+import { get } from "lodash";
+import YAML from "yaml";
class ConfigReader {
- config?: string;
+ config: object;
constructor() {
- this.config = undefined;
+ this.config = {};
}
async open(path: string) {
try {
- this.config = await readFile(path, "utf8");
+ const rawConfig = await readFile(path, "utf8");
+ this.config = YAML.parse(rawConfig);
} catch (err) {
// We don't want to catch the error here, handle it on getValue method
}
}
getValue(sectionName: string, fieldName: string) {
- if (!this.config) {
- throw new Error("Cannot find config to read");
- }
- const targetSection = this.config
- .split("\n\n")
- .filter((section) => section.includes(`[${sectionName}]`));
-
- if (targetSection.length === 0) {
- throw new Error(`Cannot find [${sectionName}] section in config`);
- }
+ const path = `${sectionName}.${fieldName}`;
+ const result = get(this.config, path);
- const section = targetSection[0];
-
- for (const line of section.split("\n")) {
- const matched = line.startsWith(fieldName);
- if (matched) {
- const results = line.split("=");
- if (results.length === 2) {
- const key = results[1].trim();
- return key;
- }
- }
+ if (result === undefined) {
+ throw new Error(`Failed to find ${path} in the local config file`);
}
- throw new Error(`Cannot find ${fieldName} in config`);
+ return result;
}
}
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index f4fd62275..4de17a101 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -58,7 +58,8 @@
"typescript": "^5",
"vite": "^4.3.0",
"vite-plugin-checker": "^0.5.5",
- "vitest": "^0.30.1"
+ "vitest": "^0.30.1",
+ "yaml": "^2.3.1"
}
},
"node_modules/@adobe/css-tools": {
@@ -4818,6 +4819,14 @@
"node": ">=10"
}
},
+ "node_modules/cosmiconfig/node_modules/yaml": {
+ "version": "1.10.2",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
+ "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
"node_modules/cross-spawn": {
"version": "7.0.3",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
@@ -10439,11 +10448,12 @@
"dev": true
},
"node_modules/yaml": {
- "version": "1.10.2",
- "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
- "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz",
+ "integrity": "sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==",
+ "dev": true,
"engines": {
- "node": ">= 6"
+ "node": ">= 14"
}
},
"node_modules/yargs": {
diff --git a/frontend/package.json b/frontend/package.json
index 8b0c7c6df..2f5089f01 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -42,7 +42,6 @@
"@types/react-dom": "^18.2.0",
"@types/react-table": "^7.7.0",
"@vitejs/plugin-react": "^4.0.0",
- "vitest": "^0.30.1",
"@vitest/coverage-c8": "^0.30.0",
"@vitest/ui": "^0.30.0",
"clsx": "^1.2.0",
@@ -62,7 +61,9 @@
"sass": "^1.62.0",
"typescript": "^5",
"vite": "^4.3.0",
- "vite-plugin-checker": "^0.5.5"
+ "vite-plugin-checker": "^0.5.5",
+ "vitest": "^0.30.1",
+ "yaml": "^2.3.1"
},
"scripts": {
"start": "vite",
diff --git a/libs/dynaconf/__init__.py b/libs/dynaconf/__init__.py
new file mode 100644
index 000000000..a99ce5f95
--- /dev/null
+++ b/libs/dynaconf/__init__.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from dynaconf.base import LazySettings # noqa
+from dynaconf.constants import DEFAULT_SETTINGS_FILES
+from dynaconf.contrib import DjangoDynaconf # noqa
+from dynaconf.contrib import FlaskDynaconf # noqa
+from dynaconf.validator import ValidationError # noqa
+from dynaconf.validator import Validator # noqa
+
+settings = LazySettings(
+ # This global `settings` is deprecated from v3.0.0+
+ # kept here for backwards compatibility
+ # To Be Removed in 4.0.x
+ warn_dynaconf_global_settings=True,
+ environments=True,
+ lowercase_read=False,
+ load_dotenv=True,
+ default_settings_paths=DEFAULT_SETTINGS_FILES,
+)
+
+# This is the new recommended base class alias
+Dynaconf = LazySettings # noqa
+
+__all__ = [
+ "Dynaconf",
+ "LazySettings",
+ "Validator",
+ "FlaskDynaconf",
+ "ValidationError",
+ "DjangoDynaconf",
+]
diff --git a/libs/dynaconf/base.py b/libs/dynaconf/base.py
new file mode 100644
index 000000000..15f3df468
--- /dev/null
+++ b/libs/dynaconf/base.py
@@ -0,0 +1,1285 @@
+from __future__ import annotations
+
+import copy
+import glob
+import importlib
+import inspect
+import os
+import warnings
+from collections import defaultdict
+from contextlib import contextmanager
+from contextlib import suppress
+from pathlib import Path
+
+from dynaconf import default_settings
+from dynaconf.loaders import default_loader
+from dynaconf.loaders import enable_external_loaders
+from dynaconf.loaders import env_loader
+from dynaconf.loaders import execute_hooks
+from dynaconf.loaders import py_loader
+from dynaconf.loaders import settings_loader
+from dynaconf.loaders import yaml_loader
+from dynaconf.utils import BANNER
+from dynaconf.utils import compat_kwargs
+from dynaconf.utils import ensure_a_list
+from dynaconf.utils import missing
+from dynaconf.utils import object_merge
+from dynaconf.utils import recursively_evaluate_lazy_format
+from dynaconf.utils import RENAMED_VARS
+from dynaconf.utils import upperfy
+from dynaconf.utils.boxing import DynaBox
+from dynaconf.utils.files import find_file
+from dynaconf.utils.functional import empty
+from dynaconf.utils.functional import LazyObject
+from dynaconf.utils.parse_conf import converters
+from dynaconf.utils.parse_conf import get_converter
+from dynaconf.utils.parse_conf import parse_conf_data
+from dynaconf.utils.parse_conf import true_values
+from dynaconf.validator import ValidatorList
+from dynaconf.vendor.box.box_list import BoxList
+
+
+class LazySettings(LazyObject):
+ """Loads settings lazily from multiple sources::
+
+ settings = Dynaconf(
+ settings_files=["settings.toml"], # path/glob
+ environments=True, # activate layered environments
+ envvar_prefix="MYAPP", # `export MYAPP_FOO=bar`
+ env_switcher="MYAPP_MODE", # `export MYAPP_MODE=production`
+ load_dotenv=True, # read a .env file
+ )
+
+ More options available on https://www.dynaconf.com/configuration/
+ """
+
+ def __init__(self, wrapped=None, **kwargs):
+ """
+ handle initialization for the customization cases
+
+ :param wrapped: a deepcopy of this object will be wrapped (issue #596)
+ :param kwargs: values that overrides default_settings
+ """
+
+ self._warn_dynaconf_global_settings = kwargs.pop(
+ "warn_dynaconf_global_settings", None
+ ) # in 3.0.0 global settings is deprecated
+
+ self.__resolve_config_aliases(kwargs)
+ compat_kwargs(kwargs)
+ self._kwargs = kwargs
+ super().__init__()
+
+ if wrapped:
+ if self._django_override:
+ # This fixes django issue #596
+ self._wrapped = copy.deepcopy(wrapped)
+ else:
+ self._wrapped = wrapped
+
+ def __resolve_config_aliases(self, kwargs):
+ """takes aliases for _FOR_DYNACONF configurations
+
+ e.g: ROOT_PATH='/' is transformed into `ROOT_PATH_FOR_DYNACONF`
+ """
+
+ mispells = {
+ "settings_files": "settings_file",
+ "SETTINGS_FILES": "SETTINGS_FILE",
+ "environment": "environments",
+ "ENVIRONMENT": "ENVIRONMENTS",
+ }
+ for misspell, correct in mispells.items():
+ if misspell in kwargs:
+ kwargs[correct] = kwargs.pop(misspell)
+
+ for_dynaconf_keys = {
+ key
+ for key in UPPER_DEFAULT_SETTINGS
+ if key.endswith("_FOR_DYNACONF")
+ }
+ aliases = {
+ key.upper()
+ for key in kwargs
+ if f"{key.upper()}_FOR_DYNACONF" in for_dynaconf_keys
+ }
+ for alias in aliases:
+ value = kwargs.pop(alias, empty)
+ if value is empty:
+ value = kwargs.pop(alias.lower())
+ kwargs[f"{alias}_FOR_DYNACONF"] = value
+
+ def __getattr__(self, name):
+ """Allow getting keys from self.store using dot notation"""
+ if self._wrapped is empty:
+ self._setup()
+ if name in self._wrapped._deleted: # noqa
+ raise AttributeError(
+ f"Attribute {name} was deleted, " "or belongs to different env"
+ )
+
+ if name not in RESERVED_ATTRS:
+ lowercase_mode = self._kwargs.get(
+ "LOWERCASE_READ_FOR_DYNACONF",
+ default_settings.LOWERCASE_READ_FOR_DYNACONF,
+ )
+ if lowercase_mode is True:
+ name = name.upper()
+
+ if (
+ name.isupper()
+ and (
+ self._wrapped._fresh
+ or name in self._wrapped.FRESH_VARS_FOR_DYNACONF
+ )
+ and name not in UPPER_DEFAULT_SETTINGS
+ ):
+ return self._wrapped.get_fresh(name)
+ value = getattr(self._wrapped, name)
+ if name not in RESERVED_ATTRS:
+ return recursively_evaluate_lazy_format(value, self)
+ return value
+
+ def __call__(self, *args, **kwargs):
+ """Allow direct call of settings('val')
+ in place of settings.get('val')
+ """
+ return self.get(*args, **kwargs)
+
+ @property
+ def _should_load_dotenv(self):
+ """Chicken and egg problem, we must manually check envvar
+ before deciding if we are loading envvars :)"""
+ _environ_load_dotenv = parse_conf_data(
+ os.environ.get("LOAD_DOTENV_FOR_DYNACONF"), tomlfy=True
+ )
+ return self._kwargs.get("load_dotenv", _environ_load_dotenv)
+
+ def _setup(self):
+ """Initial setup, run once."""
+
+ if self._warn_dynaconf_global_settings:
+ warnings.warn(
+ "Usage of `from dynaconf import settings` is now "
+ "DEPRECATED in 3.0.0+. You are encouraged to change it to "
+ "your own instance e.g: `settings = Dynaconf(*options)`",
+ DeprecationWarning,
+ )
+
+ default_settings.reload(self._should_load_dotenv)
+ environment_variable = self._kwargs.get(
+ "ENVVAR_FOR_DYNACONF", default_settings.ENVVAR_FOR_DYNACONF
+ )
+ settings_module = os.environ.get(environment_variable)
+ self._wrapped = Settings(
+ settings_module=settings_module, **self._kwargs
+ )
+
+ def configure(self, settings_module=None, **kwargs):
+ """
+ Allows user to reconfigure settings object passing a new settings
+ module or separated kwargs
+
+ :param settings_module: defines the settings file
+ :param kwargs: override default settings
+ """
+ default_settings.reload(self._should_load_dotenv)
+ environment_var = self._kwargs.get(
+ "ENVVAR_FOR_DYNACONF", default_settings.ENVVAR_FOR_DYNACONF
+ )
+ settings_module = settings_module or os.environ.get(environment_var)
+ compat_kwargs(kwargs)
+ kwargs.update(self._kwargs)
+ self._wrapped = Settings(settings_module=settings_module, **kwargs)
+
+ @property
+ def configured(self):
+ """If wrapped is configured"""
+ return self._wrapped is not empty
+
+
+class Settings:
+ """
+ Common logic for settings whether set by a module or by the user.
+ """
+
+ dynaconf_banner = BANNER
+ _store = DynaBox()
+
+ def __init__(self, settings_module=None, **kwargs): # pragma: no cover
+ """Execute loaders and custom initialization
+
+ :param settings_module: defines the settings file
+ :param kwargs: override default settings
+ """
+ self._fresh = False
+ self._loaded_envs = []
+ self._loaded_hooks = defaultdict(dict)
+ self._loaded_py_modules = []
+ self._loaded_files = []
+ self._deleted = set()
+ self._store = DynaBox(box_settings=self)
+ self._env_cache = {}
+ self._loaded_by_loaders = {}
+ self._loaders = []
+ self._defaults = DynaBox(box_settings=self)
+ self.environ = os.environ
+ self.SETTINGS_MODULE = None
+ self.filter_strategy = kwargs.get("filter_strategy", None)
+ self._not_installed_warnings = []
+ self._validate_only = kwargs.pop("validate_only", None)
+ self._validate_exclude = kwargs.pop("validate_exclude", None)
+ self._validate_only_current_env = kwargs.pop(
+ "validate_only_current_env", False
+ )
+
+ self.validators = ValidatorList(
+ self, validators=kwargs.pop("validators", None)
+ )
+
+ compat_kwargs(kwargs)
+ if settings_module:
+ self.set("SETTINGS_FILE_FOR_DYNACONF", settings_module)
+ for key, value in kwargs.items():
+ self.set(key, value)
+ # execute loaders only after setting defaults got from kwargs
+ self._defaults = kwargs
+
+ # The following flags are used for when copying of settings is done
+ skip_loaders = kwargs.get("dynaconf_skip_loaders", False)
+ skip_validators = kwargs.get("dynaconf_skip_validators", False)
+
+ if not skip_loaders:
+ self.execute_loaders()
+
+ if not skip_validators:
+ self.validators.validate(
+ only=self._validate_only,
+ exclude=self._validate_exclude,
+ only_current_env=self._validate_only_current_env,
+ )
+
+ def __call__(self, *args, **kwargs):
+ """Allow direct call of `settings('val')`
+ in place of `settings.get('val')`
+ """
+ return self.get(*args, **kwargs)
+
+ def __setattr__(self, name, value):
+ """Allow `settings.FOO = 'value'` while keeping internal attrs."""
+
+ if name in RESERVED_ATTRS:
+ super().__setattr__(name, value)
+ else:
+ self.set(name, value)
+
+ def __delattr__(self, name):
+ """stores reference in `_deleted` for proper error management"""
+ self._deleted.add(name)
+ if hasattr(self, name):
+ super().__delattr__(name)
+
+ def __contains__(self, item):
+ """Respond to `item in settings`"""
+ return item.upper() in self.store or item.lower() in self.store
+
+ def __getattribute__(self, name):
+ if name not in RESERVED_ATTRS and name not in UPPER_DEFAULT_SETTINGS:
+ with suppress(KeyError):
+ # self._store has Lazy values already evaluated
+ if (
+ name.islower()
+ and self._store.get("LOWERCASE_READ_FOR_DYNACONF", empty)
+ is False
+ ):
+ # only matches exact casing, first levels always upper
+ return self._store.to_dict()[name]
+ # perform lookups for upper, and casefold
+ return self._store[name]
+ # in case of RESERVED_ATTRS or KeyError above, keep default behaviour
+ return super().__getattribute__(name)
+
+ def __getitem__(self, item):
+ """Allow getting variables as dict keys `settings['KEY']`"""
+ value = self.get(item, default=empty)
+ if value is empty:
+ raise KeyError(f"{item} does not exist")
+ return value
+
+ def __setitem__(self, key, value):
+ """Allow `settings['KEY'] = 'value'`"""
+ self.set(key, value)
+
+ @property
+ def store(self):
+ """Gets internal storage"""
+ return self._store
+
+ def __dir__(self):
+ """Enable auto-complete for code editors"""
+ return (
+ RESERVED_ATTRS
+ + [k.lower() for k in self.keys()]
+ + list(self.keys())
+ )
+
+ def __iter__(self):
+ """Redirects to store object"""
+ yield from self._store
+
+ def items(self):
+ """Redirects to store object"""
+ return self._store.items()
+
+ def keys(self):
+ """Redirects to store object"""
+ return self.store.keys()
+
+ def values(self):
+ """Redirects to store object"""
+ return self.store.values()
+
+ def setdefault(self, item, default, apply_default_on_none=False):
+ """Returns value if exists or set it as the given default
+
+ apply_default_on_none: if True, default is set when value is None
+ """
+ value = self.get(item, empty)
+
+ # Yaml loader reads empty values as None, would we apply defaults?
+ global_apply_default = (
+ self.get("APPLY_DEFAULT_ON_NONE_FOR_DYNACONF") is not None
+ )
+ apply_default = default is not empty and (
+ value is empty
+ or (
+ value is None
+ and (
+ apply_default_on_none is True
+ or global_apply_default is True
+ )
+ )
+ )
+
+ if apply_default:
+ self.set(
+ item,
+ default,
+ loader_identifier="setdefault",
+ tomlfy=True,
+ )
+ return default
+
+ return value
+
+ def as_dict(self, env=None, internal=False):
+ """Returns a dictionary with set key and values.
+
+ :param env: Str env name, default self.current_env `DEVELOPMENT`
+ :param internal: bool - should include dynaconf internal vars?
+ """
+ ctx_mgr = suppress() if env is None else self.using_env(env)
+ with ctx_mgr:
+ data = self.store.to_dict().copy()
+ # if not internal remove internal settings
+ if not internal:
+ for name in UPPER_DEFAULT_SETTINGS:
+ data.pop(name, None)
+ return data
+
+ to_dict = as_dict # backwards compatibility
+
+ def _dotted_get(
+ self, dotted_key, default=None, parent=None, cast=None, **kwargs
+ ):
+ """
+ Perform dotted key lookups and keep track of where we are.
+ :param key: The name of the setting value, will always be upper case
+ :param default: In case of not found it will be returned
+ :param parent: Is there a pre-loaded parent in a nested data?
+ """
+ split_key = dotted_key.split(".")
+ name, keys = split_key[0], split_key[1:]
+ result = self.get(name, default=default, parent=parent, **kwargs)
+
+ # If we've reached the end, or parent key not found, then return result
+ if not keys or result == default:
+ if cast and cast in converters:
+ return get_converter(cast, result, box_settings=self)
+ elif cast is True:
+ return parse_conf_data(result, tomlfy=True, box_settings=self)
+ return result
+
+ # If we've still got key elements to traverse, let's do that.
+ return self._dotted_get(
+ ".".join(keys), default=default, parent=result, cast=cast, **kwargs
+ )
+
+ def get(
+ self,
+ key,
+ default=None,
+ cast=None,
+ fresh=False,
+ dotted_lookup=empty,
+ parent=None,
+ ):
+ """
+ Get a value from settings store, this is the preferred way to access::
+
+ >>> from dynaconf import settings
+ >>> settings.get('KEY')
+
+ :param key: The name of the setting value, will always be upper case
+ :param default: In case of not found it will be returned
+ :param cast: Should cast in to @int, @float, @bool or @json ?
+ :param fresh: Should reload from loaders store before access?
+ :param dotted_lookup: Should perform dotted-path lookup?
+ :param parent: Is there a pre-loaded parent in a nested data?
+ :return: The value if found, default or None
+ """
+ nested_sep = self._store.get("NESTED_SEPARATOR_FOR_DYNACONF")
+ if nested_sep and nested_sep in key:
+ # turn FOO__bar__ZAZ in `FOO.bar.ZAZ`
+ key = key.replace(nested_sep, ".")
+
+ if dotted_lookup is empty:
+ dotted_lookup = self._store.get("DOTTED_LOOKUP_FOR_DYNACONF")
+
+ if "." in key and dotted_lookup:
+ return self._dotted_get(
+ dotted_key=key,
+ default=default,
+ cast=cast,
+ fresh=fresh,
+ parent=parent,
+ )
+
+ if default is not None:
+ # default values should behave exactly Dynaconf parsed values
+ if isinstance(default, list):
+ default = BoxList(default)
+ elif isinstance(default, dict):
+ default = DynaBox(default)
+
+ key = upperfy(key)
+ if key in self._deleted:
+ return default
+
+ if (
+ fresh
+ or self._fresh
+ or key in getattr(self, "FRESH_VARS_FOR_DYNACONF", ())
+ ) and key not in UPPER_DEFAULT_SETTINGS:
+ self.unset(key)
+ self.execute_loaders(key=key)
+
+ data = (parent or self.store).get(key, default)
+ if cast:
+ data = get_converter(cast, data, box_settings=self)
+ return data
+
+ def exists(self, key, fresh=False):
+ """Check if key exists
+
+ :param key: the name of setting variable
+ :param fresh: if key should be taken from source directly
+ :return: Boolean
+ """
+ key = upperfy(key)
+ if key in self._deleted:
+ return False
+ return self.get(key, fresh=fresh, default=missing) is not missing
+
+ def get_fresh(self, key, default=None, cast=None):
+ """This is a shortcut to `get(key, fresh=True)`. always reload from
+ loaders store before getting the var.
+
+ :param key: The name of the setting value, will always be upper case
+ :param default: In case of not found it will be returned
+ :param cast: Should cast in to @int, @float, @bool or @json ?
+ :return: The value if found, default or None
+ """
+ return self.get(key, default=default, cast=cast, fresh=True)
+
+ def get_environ(self, key, default=None, cast=None):
+ """Get value from environment variable using os.environ.get
+
+ :param key: The name of the setting value, will always be upper case
+ :param default: In case of not found it will be returned
+ :param cast: Should cast in to @int, @float, @bool or @json ?
+ or cast must be true to use cast inference
+ :return: The value if found, default or None
+ """
+ key = upperfy(key)
+ data = self.environ.get(key, default)
+ if data:
+ if cast in converters:
+ data = get_converter(cast, data, box_settings=self)
+ elif cast is True:
+ data = parse_conf_data(data, tomlfy=True, box_settings=self)
+ return data
+
+ def exists_in_environ(self, key):
+ """Return True if env variable is exported"""
+ return upperfy(key) in self.environ
+
+ def as_bool(self, key):
+ """Partial method for get with bool cast"""
+ return self.get(key, cast="@bool")
+
+ def as_int(self, key):
+ """Partial method for get with int cast"""
+ return self.get(key, cast="@int")
+
+ def as_float(self, key):
+ """Partial method for get with float cast"""
+ return self.get(key, cast="@float")
+
+ def as_json(self, key):
+ """Partial method for get with json cast"""
+ return self.get(key, cast="@json")
+
+ @property
+ def loaded_envs(self):
+ """Get or create internal loaded envs list"""
+ if not self._loaded_envs:
+ self._loaded_envs = []
+ return self._loaded_envs
+
+ @loaded_envs.setter
+ def loaded_envs(self, value):
+ """Setter for env list"""
+ self._loaded_envs = value
+
+ # compat
+ loaded_namespaces = loaded_envs
+
+ @property
+ def loaded_by_loaders(self):
+ """Gets the internal mapping of LOADER -> values"""
+ return self._loaded_by_loaders
+
+ def from_env(self, env="", keep=False, **kwargs):
+ """Return a new isolated settings object pointing to specified env.
+
+ Example of settings.toml::
+
+ [development]
+ message = 'This is in dev'
+ [other]
+ message = 'this is in other env'
+
+ Program::
+
+ >>> from dynaconf import settings
+ >>> print(settings.MESSAGE)
+ 'This is in dev'
+ >>> print(settings.from_env('other').MESSAGE)
+ 'This is in other env'
+ # The existing settings object remains the same.
+ >>> print(settings.MESSAGE)
+ 'This is in dev'
+
+ Arguments:
+ env {str} -- Env to load (development, production, custom)
+
+ Keyword Arguments:
+ keep {bool} -- Keep pre-existing values (default: {False})
+ kwargs {dict} -- Passed directly to new instance.
+ """
+ cache_key = f"{env}_{keep}_{kwargs}"
+ if cache_key in self._env_cache:
+ return self._env_cache[cache_key]
+
+ new_data = {
+ key: self.get(key)
+ for key in UPPER_DEFAULT_SETTINGS
+ if key not in RENAMED_VARS
+ }
+
+ if self.filter_strategy:
+ # Retain the filtering strategy when switching environments
+ new_data["filter_strategy"] = self.filter_strategy
+
+ # This is here for backwards compatibility
+ # To be removed on 4.x.x
+ default_settings_paths = self.get("default_settings_paths")
+ if default_settings_paths: # pragma: no cover
+ new_data["default_settings_paths"] = default_settings_paths
+
+ if keep:
+ # keep existing values from current env
+ new_data.update(
+ {
+ key: value
+ for key, value in self.store.to_dict().copy().items()
+ if key.isupper() and key not in RENAMED_VARS
+ }
+ )
+
+ new_data.update(kwargs)
+ new_data["FORCE_ENV_FOR_DYNACONF"] = env
+ new_settings = LazySettings(**new_data)
+ self._env_cache[cache_key] = new_settings
+ return new_settings
+
+ @contextmanager
+ def using_env(self, env, clean=True, silent=True, filename=None):
+ """
+ This context manager allows the contextual use of a different env
+ Example of settings.toml::
+
+ [development]
+ message = 'This is in dev'
+ [other]
+ message = 'this is in other env'
+
+ Program::
+
+ >>> from dynaconf import settings
+ >>> print settings.MESSAGE
+ 'This is in dev'
+ >>> with settings.using_env('OTHER'):
+ ... print settings.MESSAGE
+ 'this is in other env'
+
+ :param env: Upper case name of env without any _
+ :param clean: If preloaded vars should be cleaned
+ :param silent: Silence errors
+ :param filename: Custom filename to load (optional)
+ :return: context
+ """
+ try:
+ self.setenv(env, clean=clean, silent=silent, filename=filename)
+ yield
+ finally:
+ if env.lower() != self.ENV_FOR_DYNACONF.lower():
+ del self.loaded_envs[-1]
+ self.setenv(self.current_env, clean=clean, filename=filename)
+
+ # compat
+ using_namespace = using_env
+
+ @contextmanager
+ def fresh(self):
+ """
+ this context manager force the load of a key direct from the store::
+
+ $ export DYNACONF_VALUE='Original'
+ >>> from dynaconf import settings
+ >>> print settings.VALUE
+ 'Original'
+ $ export DYNACONF_VALUE='Changed Value'
+ >>> print settings.VALUE # will not be reloaded from env vars
+ 'Original
+ >>> with settings.fresh(): # inside this context all is reloaded
+ ... print settings.VALUE
+ 'Changed Value'
+
+ an alternative is using `settings.get_fresh(key)`
+
+ :return: context
+ """
+
+ self._fresh = True
+ yield
+ self._fresh = False
+
+ @property
+ def current_env(self):
+ """Return the current active env"""
+
+ if self.ENVIRONMENTS_FOR_DYNACONF is False:
+ return self.MAIN_ENV_FOR_DYNACONF.lower()
+
+ if self.FORCE_ENV_FOR_DYNACONF is not None:
+ self.ENV_FOR_DYNACONF = self.FORCE_ENV_FOR_DYNACONF
+ return self.FORCE_ENV_FOR_DYNACONF
+
+ try:
+ return self.loaded_envs[-1]
+ except IndexError:
+ return self.ENV_FOR_DYNACONF
+
+ # compat
+ current_namespace = current_env
+
+ @property
+ def settings_module(self):
+ """Gets SETTINGS_MODULE variable"""
+ settings_module = parse_conf_data(
+ os.environ.get(
+ self.ENVVAR_FOR_DYNACONF, self.SETTINGS_FILE_FOR_DYNACONF
+ ),
+ tomlfy=True,
+ box_settings=self,
+ )
+ if settings_module != getattr(self, "SETTINGS_MODULE", None):
+ self.set("SETTINGS_MODULE", settings_module)
+
+ # This is for backewards compatibility, to be removed on 4.x.x
+ if not self.SETTINGS_MODULE and self.get("default_settings_paths"):
+ self.SETTINGS_MODULE = self.get("default_settings_paths")
+
+ return self.SETTINGS_MODULE
+
+ # Backwards compatibility see #169
+ settings_file = settings_module
+
+ def setenv(self, env=None, clean=True, silent=True, filename=None):
+ """Used to interactively change the env
+ Example of settings.toml::
+
+ [development]
+ message = 'This is in dev'
+ [other]
+ message = 'this is in other env'
+
+ Program::
+
+ >>> from dynaconf import settings
+ >>> print settings.MESSAGE
+ 'This is in dev'
+ >>> with settings.using_env('OTHER'):
+ ... print settings.MESSAGE
+ 'this is in other env'
+
+ :param env: Upper case name of env without any _
+ :param clean: If preloaded vars should be cleaned
+ :param silent: Silence errors
+ :param filename: Custom filename to load (optional)
+ :return: context
+ """
+ env = env or self.ENV_FOR_DYNACONF
+
+ if not isinstance(env, str) or "_" in env or " " in env:
+ raise ValueError("env should be a string without _ or spaces")
+
+ env = env.upper()
+
+ if env != self.ENV_FOR_DYNACONF:
+ self.loaded_envs.append(env)
+ else:
+ self.loaded_envs = []
+
+ if clean:
+ self.clean(env=env)
+ self.execute_loaders(env=env, silent=silent, filename=filename)
+
+ # compat
+ namespace = setenv
+
+ def clean(self, *args, **kwargs):
+ """Clean all loaded values to reload when switching envs"""
+ for key in list(self.store.keys()):
+ self.unset(key)
+
+ def unset(self, key, force=False):
+ """Unset on all references
+
+ :param key: The key to be unset
+ :param force: Bypass default checks and force unset
+ """
+ key = upperfy(key.strip())
+ if (
+ key not in UPPER_DEFAULT_SETTINGS
+ and key not in self._defaults
+ or force
+ ):
+ with suppress(KeyError, AttributeError):
+ # AttributeError can happen when a LazyValue consumes
+ # a previously deleted key
+ delattr(self, key)
+ del self.store[key]
+
+ def unset_all(self, keys, force=False): # pragma: no cover
+ """Unset based on a list of keys
+
+ :param keys: a list of keys
+ :param force: Bypass default checks and force unset
+ """
+ for key in keys:
+ self.unset(key, force=force)
+
+ def _dotted_set(self, dotted_key, value, tomlfy=False, **kwargs):
+ """Sets dotted keys as nested dictionaries.
+
+ Dotted set will always reassign the value, to merge use `@merge` token
+
+ Arguments:
+ dotted_key {str} -- A traversal name e.g: foo.bar.zaz
+ value {Any} -- The value to set to the nested value.
+
+ Keyword Arguments:
+ tomlfy {bool} -- Perform toml parsing (default: {False})
+ """
+
+ split_keys = dotted_key.split(".")
+ existing_data = self.get(split_keys[0], {})
+ new_data = tree = DynaBox(box_settings=self)
+
+ for k in split_keys[:-1]:
+ tree = tree.setdefault(k, {})
+
+ value = parse_conf_data(value, tomlfy=tomlfy, box_settings=self)
+ tree[split_keys[-1]] = value
+
+ if existing_data:
+ new_data = object_merge(
+ old=DynaBox({split_keys[0]: existing_data}),
+ new=new_data,
+ full_path=split_keys,
+ )
+ self.update(data=new_data, tomlfy=tomlfy, **kwargs)
+
+ def set(
+ self,
+ key,
+ value,
+ loader_identifier=None,
+ tomlfy=False,
+ dotted_lookup=empty,
+ is_secret="DeprecatedArgument", # noqa
+ merge=False,
+ ):
+ """Set a value storing references for the loader
+
+ :param key: The key to store
+ :param value: The value to store
+ :param loader_identifier: Optional loader name e.g: toml, yaml etc.
+ :param tomlfy: Bool define if value is parsed by toml (defaults False)
+ :param merge: Bool define if existing nested data will be merged.
+ """
+ if dotted_lookup is empty:
+ dotted_lookup = self.get("DOTTED_LOOKUP_FOR_DYNACONF")
+
+ nested_sep = self.get("NESTED_SEPARATOR_FOR_DYNACONF")
+ if nested_sep and nested_sep in key:
+ # turn FOO__bar__ZAZ in `FOO.bar.ZAZ`
+ key = key.replace(nested_sep, ".")
+
+ if "." in key and dotted_lookup is True:
+ return self._dotted_set(
+ key, value, loader_identifier=loader_identifier, tomlfy=tomlfy
+ )
+
+ value = parse_conf_data(value, tomlfy=tomlfy, box_settings=self)
+ key = upperfy(key.strip())
+ existing = getattr(self, key, None)
+
+ if getattr(value, "_dynaconf_del", None):
+ # just in case someone use a `@del` in a first level var.
+ self.unset(key, force=True)
+ return
+
+ if getattr(value, "_dynaconf_reset", False): # pragma: no cover
+ # just in case someone use a `@reset` in a first level var.
+ value = value.unwrap()
+
+ if getattr(value, "_dynaconf_merge_unique", False):
+ # just in case someone use a `@merge_unique` in a first level var
+ if existing:
+ value = object_merge(existing, value.unwrap(), unique=True)
+ else:
+ value = value.unwrap()
+
+ if getattr(value, "_dynaconf_merge", False):
+ # just in case someone use a `@merge` in a first level var
+ if existing:
+ value = object_merge(existing, value.unwrap())
+ else:
+ value = value.unwrap()
+
+ if existing is not None and existing != value:
+ # `dynaconf_merge` used in file root `merge=True`
+ if merge:
+ value = object_merge(existing, value)
+ else:
+ # `dynaconf_merge` may be used within the key structure
+ # Or merge_enabled is set to True
+ value = self._merge_before_set(existing, value)
+
+ if isinstance(value, dict):
+ value = DynaBox(value, box_settings=self)
+
+ self.store[key] = value
+ self._deleted.discard(key)
+ super().__setattr__(key, value)
+
+ # set loader identifiers so cleaners know which keys to clean
+ if loader_identifier and loader_identifier in self.loaded_by_loaders:
+ self.loaded_by_loaders[loader_identifier][key] = value
+ elif loader_identifier:
+ self.loaded_by_loaders[loader_identifier] = {key: value}
+ elif loader_identifier is None:
+ # if .set is called without loader identifier it becomes
+ # a default value and goes away only when explicitly unset
+ self._defaults[key] = value
+
+ def update(
+ self,
+ data=None,
+ loader_identifier=None,
+ tomlfy=False,
+ merge=False,
+ is_secret="DeprecatedArgument", # noqa
+ dotted_lookup=empty,
+ **kwargs,
+ ):
+ """
+ Update values in the current settings object without saving in stores::
+
+ >>> from dynaconf import settings
+ >>> print settings.NAME
+ 'Bruno'
+ >>> settings.update({'NAME': 'John'}, other_value=1)
+ >>> print settings.NAME
+ 'John'
+ >>> print settings.OTHER_VALUE
+ 1
+
+ :param data: Data to be updated
+ :param loader_identifier: Only to be used by custom loaders
+ :param tomlfy: Bool define if value is parsed by toml (defaults False)
+ :param merge: Bool define if existing nested data will be merged.
+ :param kwargs: extra values to update
+ :return: None
+ """
+ data = data or {}
+ data.update(kwargs)
+ for key, value in data.items():
+ self.set(
+ key,
+ value,
+ loader_identifier=loader_identifier,
+ tomlfy=tomlfy,
+ merge=merge,
+ dotted_lookup=dotted_lookup,
+ )
+
+ def _merge_before_set(self, existing, value):
+ """Merge the new value being set with the existing value before set"""
+ global_merge = getattr(self, "MERGE_ENABLED_FOR_DYNACONF", False)
+ if isinstance(value, dict):
+ local_merge = value.pop(
+ "dynaconf_merge", value.pop("dynaconf_merge_unique", None)
+ )
+ if local_merge not in (True, False, None) and not value:
+ # In case `dynaconf_merge:` holds value not boolean - ref #241
+ value = local_merge
+
+ if global_merge or local_merge:
+ value = object_merge(existing, value)
+
+ if isinstance(value, (list, tuple)):
+ local_merge = (
+ "dynaconf_merge" in value or "dynaconf_merge_unique" in value
+ )
+ if global_merge or local_merge:
+ value = list(value)
+ unique = False
+ if local_merge:
+ try:
+ value.remove("dynaconf_merge")
+ except ValueError: # EAFP
+ value.remove("dynaconf_merge_unique")
+ unique = True
+ value = object_merge(existing, value, unique=unique)
+ return value
+
+ @property
+ def loaders(self): # pragma: no cover
+ """Return available loaders"""
+ if self.LOADERS_FOR_DYNACONF in (None, 0, "0", "false", False):
+ return []
+
+ if not self._loaders:
+ self._loaders = self.LOADERS_FOR_DYNACONF
+
+ return [importlib.import_module(loader) for loader in self._loaders]
+
+ def reload(self, env=None, silent=None): # pragma: no cover
+ """Clean end Execute all loaders"""
+ self.clean()
+ self.execute_loaders(env, silent)
+
+ def execute_loaders(
+ self, env=None, silent=None, key=None, filename=None, loaders=None
+ ):
+ """Execute all internal and registered loaders
+
+ :param env: The environment to load
+ :param silent: If loading errors is silenced
+ :param key: if provided load a single key
+ :param filename: optional custom filename to load
+ :param loaders: optional list of loader modules
+ """
+ if key is None:
+ default_loader(self, self._defaults)
+
+ env = (env or self.current_env).upper()
+ silent = silent or self.SILENT_ERRORS_FOR_DYNACONF
+
+ if loaders is None:
+ self.pre_load(env, silent=silent, key=key)
+ settings_loader(
+ self, env=env, silent=silent, key=key, filename=filename
+ )
+ self.load_extra_yaml(env, silent, key) # DEPRECATED
+ enable_external_loaders(self)
+
+ loaders = self.loaders
+
+ for core_loader in loaders:
+ core_loader.load(self, env, silent=silent, key=key)
+
+ self.load_includes(env, silent=silent, key=key)
+ execute_hooks("post", self, env, silent=silent, key=key)
+
+ def pre_load(self, env, silent, key):
+ """Do we have any file to pre-load before main settings file?"""
+ preloads = self.get("PRELOAD_FOR_DYNACONF", [])
+ if preloads:
+ self.load_file(path=preloads, env=env, silent=silent, key=key)
+
+ def load_includes(self, env, silent, key):
+ """Do we have any nested includes we need to process?"""
+ includes = self.get("DYNACONF_INCLUDE", [])
+ includes.extend(ensure_a_list(self.get("INCLUDES_FOR_DYNACONF")))
+ if includes:
+ self.load_file(path=includes, env=env, silent=silent, key=key)
+ # ensure env vars are the last thing loaded after all includes
+ last_loader = self.loaders and self.loaders[-1]
+ if last_loader and last_loader == env_loader:
+ last_loader.load(self, env, silent, key)
+
+ def load_file(self, path=None, env=None, silent=True, key=None):
+ """Programmatically load files from ``path``.
+
+ :param path: A single filename or a file list
+ :param env: Which env to load from file (default current_env)
+ :param silent: Should raise errors?
+ :param key: Load a single key?
+ """
+ env = (env or self.current_env).upper()
+ files = ensure_a_list(path)
+ if files:
+ already_loaded = set()
+ for _filename in files:
+
+ if py_loader.try_to_load_from_py_module_name(
+ obj=self, name=_filename, silent=True
+ ):
+ # if it was possible to load from module name
+ # continue the loop.
+ continue
+
+ root_dir = str(self._root_path or os.getcwd())
+
+ # Issue #494
+ if (
+ isinstance(_filename, Path)
+ and str(_filename.parent) in root_dir
+ ): # pragma: no cover
+ filepath = str(_filename)
+ else:
+ filepath = os.path.join(root_dir, str(_filename))
+
+ paths = [
+ p
+ for p in sorted(glob.glob(filepath))
+ if ".local." not in p
+ ]
+ local_paths = [
+ p for p in sorted(glob.glob(filepath)) if ".local." in p
+ ]
+
+ # Handle possible *.globs sorted alphanumeric
+ for path in paths + local_paths:
+ if path in already_loaded: # pragma: no cover
+ continue
+ settings_loader(
+ obj=self,
+ env=env,
+ silent=silent,
+ key=key,
+ filename=path,
+ )
+ already_loaded.add(path)
+
+ @property
+ def _root_path(self):
+ """ROOT_PATH_FOR_DYNACONF or the path of first loaded file or '.'"""
+
+ if self.ROOT_PATH_FOR_DYNACONF is not None:
+ return self.ROOT_PATH_FOR_DYNACONF
+
+ if self._loaded_files: # called once
+ root_path = os.path.dirname(self._loaded_files[0])
+ self.set("ROOT_PATH_FOR_DYNACONF", root_path)
+ return root_path
+
+ def load_extra_yaml(self, env, silent, key):
+ """This is deprecated, kept for compat
+
+ .. deprecated:: 1.0.0
+ Use multiple settings or INCLUDES_FOR_DYNACONF files instead.
+ """
+ if self.get("YAML") is not None:
+ warnings.warn(
+ "The use of YAML var is deprecated, please define multiple "
+ "filepaths instead: "
+ "e.g: SETTINGS_FILE_FOR_DYNACONF = "
+ "'settings.py,settings.yaml,settings.toml' or "
+ "INCLUDES_FOR_DYNACONF=['path.toml', 'folder/*']"
+ )
+ yaml_loader.load(
+ self,
+ env=env,
+ filename=self.find_file(self.get("YAML")),
+ silent=silent,
+ key=key,
+ )
+
+ def path_for(self, *args):
+ """Path containing _root_path"""
+ if args and args[0].startswith(os.path.sep):
+ return os.path.join(*args)
+ return os.path.join(self._root_path or os.getcwd(), *args)
+
+ def find_file(self, *args, **kwargs):
+ kwargs.setdefault("project_root", self._root_path)
+ kwargs.setdefault(
+ "skip_files", self.get("SKIP_FILES_FOR_DYNACONF", [])
+ )
+ return find_file(*args, **kwargs)
+
+ def flag(self, key, env=None):
+ """Feature flagging system
+ write flags to redis
+ $ dynaconf write redis -s DASHBOARD=1 -e premiumuser
+ meaning: Any premium user has DASHBOARD feature enabled
+
+ In your program do::
+
+ # premium user has access to dashboard?
+ >>> if settings.flag('dashboard', 'premiumuser'):
+ ... activate_dashboard()
+
+ The value is ensured to be loaded fresh from redis server
+
+ It also works with file settings but the recommended is redis
+ as the data can be loaded once it is updated.
+
+ :param key: The flag name
+ :param env: The env to look for
+ """
+ env = env or self.ENVVAR_PREFIX_FOR_DYNACONF or "DYNACONF"
+ with self.using_env(env):
+ value = self.get_fresh(key)
+ return value is True or value in true_values
+
+ def populate_obj(self, obj, keys=None, ignore=None):
+ """Given the `obj` populate it using self.store items.
+
+ :param obj: An object to be populated, a class instance.
+ :param keys: A list of keys to be included.
+ :param ignore: A list of keys to be excluded.
+ """
+ keys = keys or self.keys()
+ for key in keys:
+ key = upperfy(key)
+ if ignore and key in ignore:
+ continue
+ value = self.get(key, empty)
+ if value is not empty:
+ setattr(obj, key, value)
+
+ def dynaconf_clone(self):
+ """Clone the current settings object."""
+ try:
+ return copy.deepcopy(self)
+ except TypeError:
+ # can't deepcopy settings object because of module object
+ # being set as value in the settings dict
+ new_data = self.to_dict(internal=True)
+ new_data["dynaconf_skip_loaders"] = True
+ new_data["dynaconf_skip_validators"] = True
+ return Settings(**new_data)
+
+ @property
+ def dynaconf(self):
+ """A proxy to access internal methods and attributes
+
+ Starting in 3.0.0 Dynaconf now allows first level lower case
+ keys that are not reserved keyword, so this is a proxy to
+ internal methods and attrs.
+ """
+
+ class AttrProxy:
+ def __init__(self, obj):
+ self.obj = obj
+
+ def __getattr__(self, name):
+ return getattr(self.obj, f"dynaconf_{name}")
+
+ return AttrProxy(self)
+
+ @property
+ def logger(self): # pragma: no cover
+ """backwards compatibility with pre 3.0 loaders
+ In dynaconf 3.0.0 logger and debug messages has been removed.
+ """
+ warnings.warn(
+ "logger and DEBUG messages has been removed on dynaconf 3.0.0"
+ )
+ import logging # noqa
+
+ return logging.getLogger("dynaconf")
+
+ def is_overridden(self, setting): # noqa
+ """This is to provide Django DJDT support: issue 382"""
+ return False
+
+
+"""Upper case default settings"""
+UPPER_DEFAULT_SETTINGS = [k for k in dir(default_settings) if k.isupper()]
+
+"""Attributes created on Settings before 3.0.0"""
+RESERVED_ATTRS = (
+ [
+ item[0]
+ for item in inspect.getmembers(LazySettings)
+ if not item[0].startswith("__")
+ ]
+ + [
+ item[0]
+ for item in inspect.getmembers(Settings)
+ if not item[0].startswith("__")
+ ]
+ + [
+ "_defaults",
+ "_deleted",
+ "_env_cache",
+ "_fresh",
+ "_kwargs",
+ "_loaded_by_loaders",
+ "_loaded_envs",
+ "_loaded_hooks",
+ "_loaded_py_modules",
+ "_loaded_files",
+ "_loaders",
+ "_not_installed_warnings",
+ "_store",
+ "_warn_dynaconf_global_settings",
+ "_should_load_dotenv",
+ "environ",
+ "SETTINGS_MODULE",
+ "filter_strategy",
+ "validators",
+ "_validate_only",
+ "_validate_exclude",
+ "_validate_only_current_env",
+ ]
+)
diff --git a/libs/dynaconf/cli.py b/libs/dynaconf/cli.py
new file mode 100644
index 000000000..8b8ab5d53
--- /dev/null
+++ b/libs/dynaconf/cli.py
@@ -0,0 +1,773 @@
+from __future__ import annotations
+
+import importlib
+import json
+import os
+import pprint
+import sys
+import warnings
+import webbrowser
+from contextlib import suppress
+from pathlib import Path
+
+from dynaconf import constants
+from dynaconf import default_settings
+from dynaconf import LazySettings
+from dynaconf import loaders
+from dynaconf import settings as legacy_settings
+from dynaconf.loaders.py_loader import get_module
+from dynaconf.utils import upperfy
+from dynaconf.utils.files import read_file
+from dynaconf.utils.functional import empty
+from dynaconf.utils.parse_conf import parse_conf_data
+from dynaconf.utils.parse_conf import unparse_conf_data
+from dynaconf.validator import ValidationError
+from dynaconf.validator import Validator
+from dynaconf.vendor import click
+from dynaconf.vendor import toml
+from dynaconf.vendor import tomllib
+
+os.environ["PYTHONIOENCODING"] = "utf-8"
+
+CWD = None
+try:
+ CWD = Path.cwd()
+except FileNotFoundError:
+ pass
+EXTS = ["ini", "toml", "yaml", "json", "py", "env"]
+WRITERS = ["ini", "toml", "yaml", "json", "py", "redis", "vault", "env"]
+
+ENC = default_settings.ENCODING_FOR_DYNACONF
+
+
+def set_settings(ctx, instance=None):
+ """Pick correct settings instance and set it to a global variable."""
+
+ global settings
+
+ settings = None
+
+ _echo_enabled = ctx.invoked_subcommand not in ["get", None]
+
+ if instance is not None:
+ if ctx.invoked_subcommand in ["init"]:
+ raise click.UsageError(
+ "-i/--instance option is not allowed for `init` command"
+ )
+ sys.path.insert(0, ".")
+ settings = import_settings(instance)
+ elif "FLASK_APP" in os.environ: # pragma: no cover
+ with suppress(ImportError, click.UsageError):
+ from flask.cli import ScriptInfo # noqa
+ from dynaconf import FlaskDynaconf
+
+ flask_app = ScriptInfo().load_app()
+ settings = FlaskDynaconf(flask_app, **flask_app.config).settings
+ _echo_enabled and click.echo(
+ click.style(
+ "Flask app detected", fg="white", bg="bright_black"
+ )
+ )
+ elif "DJANGO_SETTINGS_MODULE" in os.environ: # pragma: no cover
+ sys.path.insert(0, os.path.abspath(os.getcwd()))
+ try:
+ # Django extension v2
+ from django.conf import settings # noqa
+
+ settings.DYNACONF.configure()
+ except AttributeError:
+ settings = LazySettings()
+
+ if settings is not None:
+ _echo_enabled and click.echo(
+ click.style(
+ "Django app detected", fg="white", bg="bright_black"
+ )
+ )
+
+ if settings is None:
+
+ if instance is None and "--help" not in click.get_os_args():
+ if ctx.invoked_subcommand and ctx.invoked_subcommand not in [
+ "init",
+ ]:
+ warnings.warn(
+ "Starting on 3.x the param --instance/-i is now required. "
+ "try passing it `dynaconf -i path.to.settings <cmd>` "
+ "Example `dynaconf -i config.settings list` "
+ )
+ settings = legacy_settings
+ else:
+ settings = LazySettings(create_new_settings=True)
+ else:
+ settings = LazySettings()
+
+
+def import_settings(dotted_path):
+ """Import settings instance from python dotted path.
+
+ Last item in dotted path must be settings instance.
+
+ Example: import_settings('path.to.settings')
+ """
+ if "." in dotted_path:
+ module, name = dotted_path.rsplit(".", 1)
+ else:
+ raise click.UsageError(
+ f"invalid path to settings instance: {dotted_path}"
+ )
+ try:
+ module = importlib.import_module(module)
+ except ImportError as e:
+ raise click.UsageError(e)
+ except FileNotFoundError:
+ return
+ try:
+ return getattr(module, name)
+ except AttributeError as e:
+ raise click.UsageError(e)
+
+
+def split_vars(_vars):
+ """Splits values like foo=bar=zaz in {'foo': 'bar=zaz'}"""
+ return (
+ {
+ upperfy(k.strip()): parse_conf_data(
+ v.strip(), tomlfy=True, box_settings=settings
+ )
+ for k, _, v in [item.partition("=") for item in _vars]
+ }
+ if _vars
+ else {}
+ )
+
+
+def read_file_in_root_directory(*names, **kwargs):
+ """Read a file on root dir."""
+ return read_file(
+ os.path.join(os.path.dirname(__file__), *names),
+ encoding=kwargs.get("encoding", "utf-8"),
+ )
+
+
+def print_version(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+ click.echo(read_file_in_root_directory("VERSION"))
+ ctx.exit()
+
+
+def open_docs(ctx, param, value): # pragma: no cover
+ if not value or ctx.resilient_parsing:
+ return
+ url = "https://dynaconf.com/"
+ webbrowser.open(url, new=2)
+ click.echo(f"{url} opened in browser")
+ ctx.exit()
+
+
+def show_banner(ctx, param, value):
+ """Shows dynaconf awesome banner"""
+ if not value or ctx.resilient_parsing:
+ return
+ set_settings(ctx)
+ click.echo(settings.dynaconf_banner)
+ click.echo("Learn more at: http://github.com/dynaconf/dynaconf")
+ ctx.exit()
+
+
+ "--version",
+ is_flag=True,
+ callback=print_version,
+ expose_value=False,
+ is_eager=True,
+ help="Show dynaconf version",
+)
+ "--docs",
+ is_flag=True,
+ callback=open_docs,
+ expose_value=False,
+ is_eager=True,
+ help="Open documentation in browser",
+)
+ "--banner",
+ is_flag=True,
+ callback=show_banner,
+ expose_value=False,
+ is_eager=True,
+ help="Show awesome banner",
+)
+ "--instance",
+ "-i",
+ default=None,
+ envvar="INSTANCE_FOR_DYNACONF",
+ help="Custom instance of LazySettings",
+)
+def main(ctx, instance):
+ """Dynaconf - Command Line Interface\n
+ Documentation: https://dynaconf.com/
+ """
+ set_settings(ctx, instance)
+
+
+ "--format", "fileformat", "-f", default="toml", type=click.Choice(EXTS)
+)
+ "--path", "-p", default=CWD, help="defaults to current directory"
+)
+ "--env",
+ "-e",
+ default=None,
+ help="deprecated command (kept for compatibility but unused)",
+)
+ "--vars",
+ "_vars",
+ "-v",
+ multiple=True,
+ default=None,
+ help=(
+ "extra values to write to settings file "
+ "e.g: `dynaconf init -v NAME=foo -v X=2`"
+ ),
+)
+ "--secrets",
+ "_secrets",
+ "-s",
+ multiple=True,
+ default=None,
+ help=(
+ "secret key values to be written in .secrets "
+ "e.g: `dynaconf init -s TOKEN=kdslmflds"
+ ),
+)
[email protected]("--wg/--no-wg", default=True)
[email protected]("-y", default=False, is_flag=True)
[email protected]("--django", default=os.environ.get("DJANGO_SETTINGS_MODULE"))
+def init(ctx, fileformat, path, env, _vars, _secrets, wg, y, django):
+ """Inits a dynaconf project
+ By default it creates a settings.toml and a .secrets.toml
+ for [default|development|staging|testing|production|global] envs.
+
+ The format of the files can be changed passing
+ --format=yaml|json|ini|py.
+
+ This command must run on the project's root folder or you must pass
+ --path=/myproject/root/folder.
+
+ The --env/-e is deprecated (kept for compatibility but unused)
+ """
+ click.echo("⚙️ Configuring your Dynaconf environment")
+ click.echo("-" * 42)
+ if "FLASK_APP" in os.environ: # pragma: no cover
+ click.echo(
+ "⚠️ Flask detected, you can't use `dynaconf init` "
+ "on a flask project, instead go to dynaconf.com/flask/ "
+ "for more information.\n"
+ "Or add the following to your app.py\n"
+ "\n"
+ "from dynaconf import FlaskDynaconf\n"
+ "app = Flask(__name__)\n"
+ "FlaskDynaconf(app)\n"
+ )
+ exit(1)
+
+ path = Path(path)
+
+ if env is not None:
+ click.secho(
+ "⚠️ The --env/-e option is deprecated (kept for\n"
+ " compatibility but unused)\n",
+ fg="red",
+ bold=True,
+ # stderr=True,
+ )
+
+ if settings.get("create_new_settings") is True:
+ filename = Path("config.py")
+ if not filename.exists():
+ with open(filename, "w") as new_settings:
+ new_settings.write(
+ constants.INSTANCE_TEMPLATE.format(
+ settings_files=[
+ f"settings.{fileformat}",
+ f".secrets.{fileformat}",
+ ]
+ )
+ )
+ click.echo(
+ "🐍 The file `config.py` was generated.\n"
+ " on your code now use `from config import settings`.\n"
+ " (you must have `config` importable in your PYTHONPATH).\n"
+ )
+ else:
+ click.echo(
+ f"⁉️ You already have a {filename} so it is not going to be\n"
+ " generated for you, you will need to create your own \n"
+ " settings instance e.g: config.py \n"
+ " from dynaconf import Dynaconf \n"
+ " settings = Dynaconf(**options)\n"
+ )
+ sys.path.append(str(path))
+ set_settings(ctx, "config.settings")
+
+ env = settings.current_env.lower()
+
+ loader = importlib.import_module(f"dynaconf.loaders.{fileformat}_loader")
+ # Turn foo=bar=zaz in {'foo': 'bar=zaz'}
+ env_data = split_vars(_vars)
+ _secrets = split_vars(_secrets)
+
+ # create placeholder data for every env
+ settings_data = {}
+ secrets_data = {}
+ if env_data:
+ settings_data[env] = env_data
+ settings_data["default"] = {k: "a default value" for k in env_data}
+ if _secrets:
+ secrets_data[env] = _secrets
+ secrets_data["default"] = {k: "a default value" for k in _secrets}
+
+ if str(path).endswith(
+ constants.ALL_EXTENSIONS + ("py",)
+ ): # pragma: no cover # noqa
+ settings_path = path
+ secrets_path = path.parent / f".secrets.{fileformat}"
+ gitignore_path = path.parent / ".gitignore"
+ else:
+ if fileformat == "env":
+ if str(path) in (".env", "./.env"): # pragma: no cover
+ settings_path = path
+ elif str(path).endswith("/.env"): # pragma: no cover
+ settings_path = path
+ elif str(path).endswith(".env"): # pragma: no cover
+ settings_path = path.parent / ".env"
+ else:
+ settings_path = path / ".env"
+ Path.touch(settings_path)
+ secrets_path = None
+ else:
+ settings_path = path / f"settings.{fileformat}"
+ secrets_path = path / f".secrets.{fileformat}"
+ gitignore_path = path / ".gitignore"
+
+ if fileformat in ["py", "env"] or env == "main":
+ # for Main env, Python and .env formats writes a single env
+ settings_data = settings_data.get(env, {})
+ secrets_data = secrets_data.get(env, {})
+
+ if not y and settings_path and settings_path.exists(): # pragma: no cover
+ click.confirm(
+ f"⁉ {settings_path} exists do you want to overwrite it?",
+ abort=True,
+ )
+
+ if not y and secrets_path and secrets_path.exists(): # pragma: no cover
+ click.confirm(
+ f"⁉ {secrets_path} exists do you want to overwrite it?",
+ abort=True,
+ )
+
+ if settings_path:
+ loader.write(settings_path, settings_data, merge=True)
+ click.echo(
+ f"🎛️ {settings_path.name} created to hold your settings.\n"
+ )
+
+ if secrets_path:
+ loader.write(secrets_path, secrets_data, merge=True)
+ click.echo(f"🔑 {secrets_path.name} created to hold your secrets.\n")
+ ignore_line = ".secrets.*"
+ comment = "\n# Ignore dynaconf secret files\n"
+ if not gitignore_path.exists():
+ with open(str(gitignore_path), "w", encoding=ENC) as f:
+ f.writelines([comment, ignore_line, "\n"])
+ else:
+ existing = (
+ ignore_line in open(str(gitignore_path), encoding=ENC).read()
+ )
+ if not existing: # pragma: no cover
+ with open(str(gitignore_path), "a+", encoding=ENC) as f:
+ f.writelines([comment, ignore_line, "\n"])
+
+ click.echo(
+ f"🙈 the {secrets_path.name} is also included in `.gitignore` \n"
+ " beware to not push your secrets to a public repo \n"
+ " or use dynaconf builtin support for Vault Servers.\n"
+ )
+
+ if django: # pragma: no cover
+ dj_module, _ = get_module({}, django)
+ dj_filename = dj_module.__file__
+ if Path(dj_filename).exists():
+ click.confirm(
+ f"⁉ {dj_filename} is found do you want to add dynaconf?",
+ abort=True,
+ )
+ with open(dj_filename, "a") as dj_file:
+ dj_file.write(constants.DJANGO_PATCH)
+ click.echo("🎠 Now your Django settings are managed by Dynaconf")
+ else:
+ click.echo("❌ Django settings file not written.")
+ else:
+ click.echo(
+ "🎉 Dynaconf is configured! read more on https://dynaconf.com\n"
+ " Use `dynaconf -i config.settings list` to see your settings\n"
+ )
+
+
[email protected](name="get")
[email protected]("key", required=True)
+ "--default",
+ "-d",
+ default=empty,
+ help="Default value if settings doesn't exist",
+)
+ "--env", "-e", default=None, help="Filters the env to get the values"
+)
+ "--unparse",
+ "-u",
+ default=False,
+ help="Unparse data by adding markers such as @none, @int etc..",
+ is_flag=True,
+)
+def get(key, default, env, unparse):
+ """Returns the raw value for a settings key.
+
+ If result is a dict, list or tuple it is printes as a valid json string.
+ """
+ if env:
+ env = env.strip()
+ if key:
+ key = key.strip()
+
+ if env:
+ settings.setenv(env)
+
+ if default is not empty:
+ result = settings.get(key, default)
+ else:
+ result = settings[key] # let the keyerror raises
+
+ if unparse:
+ result = unparse_conf_data(result)
+
+ if isinstance(result, (dict, list, tuple)):
+ result = json.dumps(result, sort_keys=True)
+
+ click.echo(result, nl=False)
+
+
[email protected](name="list")
+ "--env", "-e", default=None, help="Filters the env to get the values"
+)
[email protected]("--key", "-k", default=None, help="Filters a single key")
+ "--more",
+ "-m",
+ default=None,
+ help="Pagination more|less style",
+ is_flag=True,
+)
+ "--loader",
+ "-l",
+ default=None,
+ help="a loader identifier to filter e.g: toml|yaml",
+)
+ "--all",
+ "_all",
+ "-a",
+ default=False,
+ is_flag=True,
+ help="show dynaconf internal settings?",
+)
+ "--output",
+ "-o",
+ type=click.Path(writable=True, dir_okay=False),
+ default=None,
+ help="Filepath to write the listed values as json",
+)
+ "--output-flat",
+ "flat",
+ is_flag=True,
+ default=False,
+ help="Output file is flat (do not include [env] name)",
+)
+def _list(env, key, more, loader, _all=False, output=None, flat=False):
+ """Lists all user defined config values
+ and if `--all` is passed it also shows dynaconf internal variables.
+ """
+ if env:
+ env = env.strip()
+ if key:
+ key = key.strip()
+ if loader:
+ loader = loader.strip()
+
+ if env:
+ settings.setenv(env)
+
+ cur_env = settings.current_env.lower()
+
+ if cur_env == "main":
+ flat = True
+
+ click.echo(
+ click.style(
+ f"Working in {cur_env} environment ",
+ bold=True,
+ bg="bright_blue",
+ fg="bright_white",
+ )
+ )
+
+ if not loader:
+ data = settings.as_dict(env=env, internal=_all)
+ else:
+ identifier = f"{loader}_{cur_env}"
+ data = settings._loaded_by_loaders.get(identifier, {})
+ data = data or settings._loaded_by_loaders.get(loader, {})
+
+ # remove to avoid displaying twice
+ data.pop("SETTINGS_MODULE", None)
+
+ def color(_k):
+ if _k in dir(default_settings):
+ return "blue"
+ return "magenta"
+
+ def format_setting(_k, _v):
+ key = click.style(_k, bg=color(_k), fg="bright_white")
+ data_type = click.style(
+ f"<{type(_v).__name__}>", bg="bright_black", fg="bright_white"
+ )
+ value = pprint.pformat(_v)
+ return f"{key}{data_type} {value}"
+
+ if not key:
+ datalines = "\n".join(
+ format_setting(k, v)
+ for k, v in data.items()
+ if k not in data.get("RENAMED_VARS", [])
+ )
+ (click.echo_via_pager if more else click.echo)(datalines)
+ if output:
+ loaders.write(output, data, env=not flat and cur_env)
+ else:
+ key = upperfy(key)
+
+ try:
+ value = settings.get(key, empty)
+ except AttributeError:
+ value = empty
+
+ if value is empty:
+ click.echo(click.style("Key not found", bg="red", fg="white"))
+ return
+
+ click.echo(format_setting(key, value))
+ if output:
+ loaders.write(output, {key: value}, env=not flat and cur_env)
+
+ if env:
+ settings.setenv()
+
+
[email protected]("to", required=True, type=click.Choice(WRITERS))
+ "--vars",
+ "_vars",
+ "-v",
+ multiple=True,
+ default=None,
+ help=(
+ "key values to be written "
+ "e.g: `dynaconf write toml -e NAME=foo -e X=2"
+ ),
+)
+ "--secrets",
+ "_secrets",
+ "-s",
+ multiple=True,
+ default=None,
+ help=(
+ "secret key values to be written in .secrets "
+ "e.g: `dynaconf write toml -s TOKEN=kdslmflds -s X=2"
+ ),
+)
+ "--path",
+ "-p",
+ default=CWD,
+ help="defaults to current directory/settings.{ext}",
+)
+ "--env",
+ "-e",
+ default="default",
+ help=(
+ "env to write to defaults to DEVELOPMENT for files "
+ "for external sources like Redis and Vault "
+ "it will be DYNACONF or the value set in "
+ "$ENVVAR_PREFIX_FOR_DYNACONF"
+ ),
+)
[email protected]("-y", default=False, is_flag=True)
+def write(to, _vars, _secrets, path, env, y):
+ """Writes data to specific source"""
+ _vars = split_vars(_vars)
+ _secrets = split_vars(_secrets)
+ loader = importlib.import_module(f"dynaconf.loaders.{to}_loader")
+
+ if to in EXTS:
+
+ # Lets write to a file
+ path = Path(path)
+
+ if str(path).endswith(constants.ALL_EXTENSIONS + ("py",)):
+ settings_path = path
+ secrets_path = path.parent / f".secrets.{to}"
+ else:
+ if to == "env":
+ if str(path) in (".env", "./.env"): # pragma: no cover
+ settings_path = path
+ elif str(path).endswith("/.env"):
+ settings_path = path
+ elif str(path).endswith(".env"):
+ settings_path = path.parent / ".env"
+ else:
+ settings_path = path / ".env"
+ Path.touch(settings_path)
+ secrets_path = None
+ _vars.update(_secrets)
+ else:
+ settings_path = path / f"settings.{to}"
+ secrets_path = path / f".secrets.{to}"
+
+ if (
+ _vars and not y and settings_path and settings_path.exists()
+ ): # pragma: no cover # noqa
+ click.confirm(
+ f"{settings_path} exists do you want to overwrite it?",
+ abort=True,
+ )
+
+ if (
+ _secrets and not y and secrets_path and secrets_path.exists()
+ ): # pragma: no cover # noqa
+ click.confirm(
+ f"{secrets_path} exists do you want to overwrite it?",
+ abort=True,
+ )
+
+ if to not in ["py", "env"]:
+ if _vars:
+ _vars = {env: _vars}
+ if _secrets:
+ _secrets = {env: _secrets}
+
+ if _vars and settings_path:
+ loader.write(settings_path, _vars, merge=True)
+ click.echo(f"Data successful written to {settings_path}")
+
+ if _secrets and secrets_path:
+ loader.write(secrets_path, _secrets, merge=True)
+ click.echo(f"Data successful written to {secrets_path}")
+
+ else: # pragma: no cover
+ # lets write to external source
+ with settings.using_env(env):
+ # make sure we're in the correct environment
+ loader.write(settings, _vars, **_secrets)
+ click.echo(f"Data successful written to {to}")
+
+
+ "--path", "-p", default=CWD, help="defaults to current directory"
+)
+def validate(path): # pragma: no cover
+ """Validates Dynaconf settings based on rules defined in
+ dynaconf_validators.toml"""
+ # reads the 'dynaconf_validators.toml' from path
+ # for each section register the validator for specific env
+ # call validate
+
+ path = Path(path)
+
+ if not str(path).endswith(".toml"):
+ path = path / "dynaconf_validators.toml"
+
+ if not path.exists(): # pragma: no cover # noqa
+ click.echo(click.style(f"{path} not found", fg="white", bg="red"))
+ sys.exit(1)
+
+ try: # try tomlib first
+ validation_data = tomllib.load(open(str(path), "rb"))
+ except UnicodeDecodeError: # fallback to legacy toml (TBR in 4.0.0)
+ warnings.warn(
+ "TOML files should have only UTF-8 encoded characters. "
+ "starting on 4.0.0 dynaconf will stop allowing invalid chars.",
+ )
+ validation_data = toml.load(
+ open(str(path), encoding=default_settings.ENCODING_FOR_DYNACONF),
+ )
+
+ success = True
+ for env, name_data in validation_data.items():
+ for name, data in name_data.items():
+ if not isinstance(data, dict): # pragma: no cover
+ click.echo(
+ click.style(
+ f"Invalid rule for parameter '{name}'",
+ fg="white",
+ bg="yellow",
+ )
+ )
+ else:
+ data.setdefault("env", env)
+ click.echo(
+ click.style(
+ f"Validating '{name}' with '{data}'",
+ fg="white",
+ bg="blue",
+ )
+ )
+ try:
+ Validator(name, **data).validate(settings)
+ except ValidationError as e:
+ click.echo(
+ click.style(f"Error: {e}", fg="white", bg="red")
+ )
+ success = False
+
+ if success:
+ click.echo(click.style("Validation success!", fg="white", bg="green"))
+ else:
+ click.echo(click.style("Validation error!", fg="white", bg="red"))
+ sys.exit(1)
+
+
+if __name__ == "__main__": # pragma: no cover
+ main()
diff --git a/libs/dynaconf/constants.py b/libs/dynaconf/constants.py
new file mode 100644
index 000000000..625627304
--- /dev/null
+++ b/libs/dynaconf/constants.py
@@ -0,0 +1,52 @@
+# pragma: no cover
+from __future__ import annotations
+
+INI_EXTENSIONS = (".ini", ".conf", ".properties")
+TOML_EXTENSIONS = (".toml", ".tml")
+YAML_EXTENSIONS = (".yaml", ".yml")
+JSON_EXTENSIONS = (".json",)
+
+ALL_EXTENSIONS = (
+ INI_EXTENSIONS + TOML_EXTENSIONS + YAML_EXTENSIONS + JSON_EXTENSIONS
+) # noqa
+
+EXTERNAL_LOADERS = {
+ "ENV": "dynaconf.loaders.env_loader",
+ "VAULT": "dynaconf.loaders.vault_loader",
+ "REDIS": "dynaconf.loaders.redis_loader",
+}
+
+DJANGO_PATCH = """
+# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
+# Read more at https://www.dynaconf.com/django/
+import dynaconf # noqa
+settings = dynaconf.DjangoDynaconf(__name__) # noqa
+# HERE ENDS DYNACONF EXTENSION LOAD (No more code below this line)
+ """
+
+INSTANCE_TEMPLATE = """
+from dynaconf import Dynaconf
+
+settings = Dynaconf(
+ envvar_prefix="DYNACONF",
+ settings_files={settings_files},
+)
+
+# `envvar_prefix` = export envvars with `export DYNACONF_FOO=bar`.
+# `settings_files` = Load these files in the order.
+"""
+
+EXTS = (
+ "py",
+ "toml",
+ "tml",
+ "yaml",
+ "yml",
+ "ini",
+ "conf",
+ "properties",
+ "json",
+)
+DEFAULT_SETTINGS_FILES = [f"settings.{ext}" for ext in EXTS] + [
+ f".secrets.{ext}" for ext in EXTS
+]
diff --git a/libs/dynaconf/contrib/__init__.py b/libs/dynaconf/contrib/__init__.py
new file mode 100644
index 000000000..2c0279a49
--- /dev/null
+++ b/libs/dynaconf/contrib/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import annotations
+
+from dynaconf.contrib.django_dynaconf_v2 import DjangoDynaconf # noqa
+from dynaconf.contrib.flask_dynaconf import DynaconfConfig # noqa
+from dynaconf.contrib.flask_dynaconf import FlaskDynaconf # noqa
diff --git a/libs/dynaconf/contrib/django_dynaconf_v2.py b/libs/dynaconf/contrib/django_dynaconf_v2.py
new file mode 100644
index 000000000..aac4aab83
--- /dev/null
+++ b/libs/dynaconf/contrib/django_dynaconf_v2.py
@@ -0,0 +1,142 @@
+"""Dynaconf django extension
+
+In the `django_project/settings.py` put at the very bottom of the file:
+
+# HERE STARTS DYNACONF EXTENSION LOAD (Keep at the very bottom of settings.py)
+# Read more at https://www.dynaconf.com/django/
+import dynaconf # noqa
+settings = dynaconf.DjangoDynaconf(__name__) # noqa
+# HERE ENDS DYNACONF EXTENSION LOAD (No more code below this line)
+
+Now in the root of your Django project
+(the same folder where manage.py is located)
+
+Put your config files `settings.{py|yaml|toml|ini|json}`
+and or `.secrets.{py|yaml|toml|ini|json}`
+
+On your projects root folder now you can start as::
+
+ DJANGO_DEBUG='false' \
+ DJANGO_ALLOWED_HOSTS='["localhost"]' \
+ python manage.py runserver
+"""
+from __future__ import annotations
+
+import inspect
+import os
+import sys
+
+import dynaconf
+
+try: # pragma: no cover
+ from django import conf
+ from django.conf import settings as django_settings
+
+ django_installed = True
+except ImportError: # pragma: no cover
+ django_installed = False
+
+
+def load(django_settings_module_name=None, **kwargs): # pragma: no cover
+ if not django_installed:
+ raise RuntimeError(
+ "To use this extension django must be installed "
+ "install it with: pip install django"
+ )
+
+ try:
+ django_settings_module = sys.modules[django_settings_module_name]
+ except KeyError:
+ django_settings_module = sys.modules[
+ os.environ["DJANGO_SETTINGS_MODULE"]
+ ]
+
+ settings_module_name = django_settings_module.__name__
+ settings_file = os.path.abspath(django_settings_module.__file__)
+ _root_path = os.path.dirname(settings_file)
+
+ # 1) Create the lazy settings object reusing settings_module consts
+ options = {
+ k.upper(): v
+ for k, v in django_settings_module.__dict__.items()
+ if k.isupper()
+ }
+ options.update(kwargs)
+ options.setdefault(
+ "SKIP_FILES_FOR_DYNACONF", [settings_file, "dynaconf_merge"]
+ )
+ options.setdefault("ROOT_PATH_FOR_DYNACONF", _root_path)
+ options.setdefault("ENVVAR_PREFIX_FOR_DYNACONF", "DJANGO")
+ options.setdefault("ENV_SWITCHER_FOR_DYNACONF", "DJANGO_ENV")
+ options.setdefault("ENVIRONMENTS_FOR_DYNACONF", True)
+ options.setdefault("load_dotenv", True)
+ options.setdefault(
+ "default_settings_paths", dynaconf.DEFAULT_SETTINGS_FILES
+ )
+
+ class UserSettingsHolder(dynaconf.LazySettings):
+ _django_override = True
+
+ lazy_settings = dynaconf.LazySettings(**options)
+ dynaconf.settings = lazy_settings # rebind the settings
+
+ # 2) Set all settings back to django_settings_module for 'django check'
+ lazy_settings.populate_obj(django_settings_module)
+
+ # 3) Bind `settings` and `DYNACONF`
+ setattr(django_settings_module, "settings", lazy_settings)
+ setattr(django_settings_module, "DYNACONF", lazy_settings)
+
+ # 4) keep django original settings
+ dj = {}
+ for key in dir(django_settings):
+ if (
+ key.isupper()
+ and (key != "SETTINGS_MODULE")
+ and key not in lazy_settings.store
+ ):
+ dj[key] = getattr(django_settings, key, None)
+ dj["ORIGINAL_SETTINGS_MODULE"] = django_settings.SETTINGS_MODULE
+
+ lazy_settings.update(dj)
+
+ # Allow dynaconf_hooks to be in the same folder as the django.settings
+ dynaconf.loaders.execute_hooks(
+ "post",
+ lazy_settings,
+ lazy_settings.current_env,
+ modules=[settings_module_name],
+ files=[settings_file],
+ )
+ lazy_settings._loaded_py_modules.insert(0, settings_module_name)
+
+ # 5) Patch django.conf.settings
+ class Wrapper:
+
+ # lazy_settings = conf.settings.lazy_settings
+
+ def __getattribute__(self, name):
+ if name == "settings":
+ return lazy_settings
+ if name == "UserSettingsHolder":
+ return UserSettingsHolder
+ return getattr(conf, name)
+
+ # This implementation is recommended by Guido Van Rossum
+ # https://mail.python.org/pipermail/python-ideas/2012-May/014969.html
+ sys.modules["django.conf"] = Wrapper()
+
+ # 6) Enable standalone scripts to use Dynaconf
+ # This is for when `django.conf.settings` is imported directly
+ # on external `scripts` (out of Django's lifetime)
+ for stack_item in reversed(inspect.stack()):
+ if isinstance(
+ stack_item.frame.f_globals.get("settings"), conf.LazySettings
+ ):
+ stack_item.frame.f_globals["settings"] = lazy_settings
+
+ return lazy_settings
+
+
+# syntax sugar
+DjangoDynaconf = load # noqa
diff --git a/libs/dynaconf/contrib/flask_dynaconf.py b/libs/dynaconf/contrib/flask_dynaconf.py
new file mode 100644
index 000000000..a305194a2
--- /dev/null
+++ b/libs/dynaconf/contrib/flask_dynaconf.py
@@ -0,0 +1,230 @@
+from __future__ import annotations
+
+import warnings
+from collections import ChainMap
+from contextlib import suppress
+
+try:
+ from flask.config import Config
+
+ flask_installed = True
+except ImportError: # pragma: no cover
+ flask_installed = False
+ Config = object
+
+
+import dynaconf
+import pkg_resources
+
+
+class FlaskDynaconf:
+ """The arguments are.
+ app = The created app
+ dynaconf_args = Extra args to be passed to Dynaconf (validator for example)
+
+ All other values are stored as config vars specially::
+
+ ENVVAR_PREFIX_FOR_DYNACONF = env prefix for your envvars to be loaded
+ example:
+ if you set to `MYSITE` then
+ export MYSITE_SQL_PORT='@int 5445'
+
+ with that exported to env you access using:
+ app.config.SQL_PORT
+ app.config.get('SQL_PORT')
+ app.config.get('sql_port')
+ # get is case insensitive
+ app.config['SQL_PORT']
+
+ Dynaconf uses `@int, @bool, @float, @json` to cast
+ env vars
+
+ SETTINGS_FILE_FOR_DYNACONF = The name of the module or file to use as
+ default to load settings. If nothing is
+ passed it will be `settings.*` or value
+ found in `ENVVAR_FOR_DYNACONF`
+ Dynaconf supports
+ .py, .yml, .toml, ini, json
+
+ ATTENTION: Take a look at `settings.yml` and `.secrets.yml` to know the
+ required settings format.
+
+ Settings load order in Dynaconf:
+
+ - Load all defaults and Flask defaults
+ - Load all passed variables when applying FlaskDynaconf
+ - Update with data in settings files
+ - Update with data in environment vars `ENVVAR_FOR_DYNACONF_`
+
+
+ TOML files are very useful to have `envd` settings, lets say,
+ `production` and `development`.
+
+ You can also achieve the same using multiple `.py` files naming as
+ `settings.py`, `production_settings.py` and `development_settings.py`
+ (see examples/validator)
+
+ Example::
+
+ app = Flask(__name__)
+ FlaskDynaconf(
+ app,
+ ENV='MYSITE',
+ SETTINGS_FILE='settings.yml',
+ EXTRA_VALUE='You can add additional config vars here'
+ )
+
+ Take a look at examples/flask in Dynaconf repository
+
+ """
+
+ def __init__(
+ self,
+ app=None,
+ instance_relative_config=False,
+ dynaconf_instance=None,
+ extensions_list=False,
+ **kwargs,
+ ):
+ """kwargs holds initial dynaconf configuration"""
+ if not flask_installed: # pragma: no cover
+ raise RuntimeError(
+ "To use this extension Flask must be installed "
+ "install it with: pip install flask"
+ )
+ self.kwargs = {k.upper(): v for k, v in kwargs.items()}
+ kwargs.setdefault("ENVVAR_PREFIX", "FLASK")
+ env_prefix = f"{kwargs['ENVVAR_PREFIX']}_ENV" # FLASK_ENV
+ kwargs.setdefault("ENV_SWITCHER", env_prefix)
+ kwargs.setdefault("ENVIRONMENTS", True)
+ kwargs.setdefault("load_dotenv", True)
+ kwargs.setdefault(
+ "default_settings_paths", dynaconf.DEFAULT_SETTINGS_FILES
+ )
+
+ self.dynaconf_instance = dynaconf_instance
+ self.instance_relative_config = instance_relative_config
+ self.extensions_list = extensions_list
+ if app:
+ self.init_app(app, **kwargs)
+
+ def init_app(self, app, **kwargs):
+ """kwargs holds initial dynaconf configuration"""
+ self.kwargs.update(kwargs)
+ self.settings = self.dynaconf_instance or dynaconf.LazySettings(
+ **self.kwargs
+ )
+ dynaconf.settings = self.settings # rebind customized settings
+ app.config = self.make_config(app)
+ app.dynaconf = self.settings
+
+ if self.extensions_list:
+ if not isinstance(self.extensions_list, str):
+ self.extensions_list = "EXTENSIONS"
+ app.config.load_extensions(self.extensions_list)
+
+ def make_config(self, app):
+ root_path = app.root_path
+ if self.instance_relative_config: # pragma: no cover
+ root_path = app.instance_path
+ if self.dynaconf_instance:
+ self.settings.update(self.kwargs)
+ return DynaconfConfig(
+ root_path=root_path,
+ defaults=app.config,
+ _settings=self.settings,
+ _app=app,
+ )
+
+
+class DynaconfConfig(Config):
+ """
+ Replacement for flask.config_class that responds as a Dynaconf instance.
+ """
+
+ def __init__(self, _settings, _app, *args, **kwargs):
+ """perform the initial load"""
+ super().__init__(*args, **kwargs)
+
+ # Bring Dynaconf instance value to Flask Config
+ Config.update(self, _settings.store)
+
+ self._settings = _settings
+ self._app = _app
+
+ def __contains__(self, item):
+ return hasattr(self, item)
+
+ def __getitem__(self, key):
+ try:
+ return self._settings[key]
+ except KeyError:
+ return Config.__getitem__(self, key)
+
+ def __setitem__(self, key, value):
+ """
+ Allows app.config['key'] = 'foo'
+ """
+ return self._settings.__setitem__(key, value)
+
+ def _chain_map(self):
+ return ChainMap(self._settings, dict(dict.items(self)))
+
+ def keys(self):
+ return self._chain_map().keys()
+
+ def values(self):
+ return self._chain_map().values()
+
+ def items(self):
+ return self._chain_map().items()
+
+ def setdefault(self, key, value=None):
+ return self._chain_map().setdefault(key, value)
+
+ def __iter__(self):
+ return self._chain_map().__iter__()
+
+ def __getattr__(self, name):
+ """
+ First try to get value from dynaconf then from Flask Config
+ """
+ with suppress(AttributeError):
+ return getattr(self._settings, name)
+
+ with suppress(KeyError):
+ return self[name]
+
+ raise AttributeError(
+ f"'{self.__class__.__name__}' object has no attribute '{name}'"
+ )
+
+ def __call__(self, name, *args, **kwargs):
+ return self.get(name, *args, **kwargs)
+
+ def get(self, key, default=None):
+ """Gets config from dynaconf variables
+ if variables does not exists in dynaconf try getting from
+ `app.config` to support runtime settings."""
+ return self._settings.get(key, Config.get(self, key, default))
+
+ def load_extensions(self, key="EXTENSIONS", app=None):
+ """Loads flask extensions dynamically."""
+ app = app or self._app
+ extensions = app.config.get(key)
+ if not extensions:
+ warnings.warn(
+ f"Settings is missing {key} to load Flask Extensions",
+ RuntimeWarning,
+ )
+ return
+
+ for object_reference in app.config[key]:
+ # add a placeholder `name` to create a valid entry point
+ entry_point_spec = f"__name = {object_reference}"
+ # parse the entry point specification
+ entry_point = pkg_resources.EntryPoint.parse(entry_point_spec)
+ # dynamically resolve the entry point
+ initializer = entry_point.resolve()
+ # Invoke extension initializer
+ initializer(app)
diff --git a/libs/dynaconf/default_settings.py b/libs/dynaconf/default_settings.py
new file mode 100644
index 000000000..40c627b87
--- /dev/null
+++ b/libs/dynaconf/default_settings.py
@@ -0,0 +1,252 @@
+from __future__ import annotations
+
+import importlib
+import os
+import sys
+import warnings
+
+from dynaconf.utils import RENAMED_VARS
+from dynaconf.utils import upperfy
+from dynaconf.utils import warn_deprecations
+from dynaconf.utils.files import find_file
+from dynaconf.utils.parse_conf import parse_conf_data
+from dynaconf.vendor.dotenv import load_dotenv
+
+
+def try_renamed(key, value, older_key, current_key):
+ if value is None:
+ if key == current_key:
+ if older_key in os.environ:
+ warnings.warn(
+ f"{older_key} is deprecated please use {current_key}",
+ DeprecationWarning,
+ )
+ value = os.environ[older_key]
+ return value
+
+
+def get(key, default=None):
+ value = os.environ.get(upperfy(key))
+
+ # compatibility with renamed variables
+ for old, new in RENAMED_VARS.items():
+ value = try_renamed(key, value, old, new)
+
+ return (
+ parse_conf_data(value, tomlfy=True, box_settings={})
+ if value is not None
+ else default
+ )
+
+
+def start_dotenv(obj=None, root_path=None):
+ # load_from_dotenv_if_installed
+ obj = obj or {}
+ _find_file = getattr(obj, "find_file", find_file)
+ root_path = (
+ root_path
+ or getattr(obj, "_root_path", None)
+ or get("ROOT_PATH_FOR_DYNACONF")
+ )
+
+ dotenv_path = (
+ obj.get("DOTENV_PATH_FOR_DYNACONF")
+ or get("DOTENV_PATH_FOR_DYNACONF")
+ or _find_file(".env", project_root=root_path)
+ )
+
+ load_dotenv(
+ dotenv_path,
+ verbose=obj.get("DOTENV_VERBOSE_FOR_DYNACONF", False),
+ override=obj.get("DOTENV_OVERRIDE_FOR_DYNACONF", False),
+ )
+
+ warn_deprecations(os.environ)
+
+
+def reload(load_dotenv=None, *args, **kwargs):
+ if load_dotenv:
+ start_dotenv(*args, **kwargs)
+ importlib.reload(sys.modules[__name__])
+
+
+# default proj root
+# pragma: no cover
+ROOT_PATH_FOR_DYNACONF = get("ROOT_PATH_FOR_DYNACONF", None)
+
+# Default settings file
+SETTINGS_FILE_FOR_DYNACONF = get("SETTINGS_FILE_FOR_DYNACONF", [])
+
+# MISPELLS `FILES` when/if it happens
+mispelled_files = get("SETTINGS_FILES_FOR_DYNACONF", None)
+if not SETTINGS_FILE_FOR_DYNACONF and mispelled_files is not None:
+ SETTINGS_FILE_FOR_DYNACONF = mispelled_files
+
+# # ENV SETTINGS
+# # In dynaconf 1.0.0 `NAMESPACE` got renamed to `ENV`
+
+
+# If provided environments will be loaded separately
+ENVIRONMENTS_FOR_DYNACONF = get("ENVIRONMENTS_FOR_DYNACONF", False)
+MAIN_ENV_FOR_DYNACONF = get("MAIN_ENV_FOR_DYNACONF", "MAIN")
+
+# If False dynaconf will allow access to first level settings only in upper
+LOWERCASE_READ_FOR_DYNACONF = get("LOWERCASE_READ_FOR_DYNACONF", True)
+
+# The environment variable to switch current env
+ENV_SWITCHER_FOR_DYNACONF = get(
+ "ENV_SWITCHER_FOR_DYNACONF", "ENV_FOR_DYNACONF"
+)
+
+# The current env by default is DEVELOPMENT
+# to switch is needed to `export ENV_FOR_DYNACONF=PRODUCTION`
+# or put that value in .env file
+# this value is used only when reading files like .toml|yaml|ini|json
+ENV_FOR_DYNACONF = get(ENV_SWITCHER_FOR_DYNACONF, "DEVELOPMENT")
+
+# This variable exists to support `from_env` method
+FORCE_ENV_FOR_DYNACONF = get("FORCE_ENV_FOR_DYNACONF", None)
+
+# Default values is taken from DEFAULT pseudo env
+# this value is used only when reading files like .toml|yaml|ini|json
+DEFAULT_ENV_FOR_DYNACONF = get("DEFAULT_ENV_FOR_DYNACONF", "DEFAULT")
+
+# Global values are taken from DYNACONF env used for exported envvars
+# Values here overwrites all other envs
+# This namespace is used for files and also envvars
+ENVVAR_PREFIX_FOR_DYNACONF = get("ENVVAR_PREFIX_FOR_DYNACONF", "DYNACONF")
+
+# By default all environment variables (filtered by `envvar_prefix`) will
+# be pulled into settings space. In case some of them are polluting the space,
+# setting this flag to `True` will change this behaviour.
+# Only "known" variables will be considered -- that is variables defined before
+# in settings files (or includes/preloads).
+IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF = get(
+ "IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF", False
+)
+
+AUTO_CAST_FOR_DYNACONF = get("AUTO_CAST_FOR_DYNACONF", True)
+
+# The default encoding to open settings files
+ENCODING_FOR_DYNACONF = get("ENCODING_FOR_DYNACONF", "utf-8")
+
+# Merge objects on load
+MERGE_ENABLED_FOR_DYNACONF = get("MERGE_ENABLED_FOR_DYNACONF", False)
+
+# Lookup keys considering dots as separators
+DOTTED_LOOKUP_FOR_DYNACONF = get("DOTTED_LOOKUP_FOR_DYNACONF", True)
+
+# BY default `__` is the separator for nested env vars
+# export `DYNACONF__DATABASE__server=server.com`
+# export `DYNACONF__DATABASE__PORT=6666`
+# Should result in settings.DATABASE == {'server': 'server.com', 'PORT': 6666}
+# To disable it one can set `NESTED_SEPARATOR_FOR_DYNACONF=false`
+NESTED_SEPARATOR_FOR_DYNACONF = get("NESTED_SEPARATOR_FOR_DYNACONF", "__")
+
+# The env var specifying settings module
+ENVVAR_FOR_DYNACONF = get("ENVVAR_FOR_DYNACONF", "SETTINGS_FILE_FOR_DYNACONF")
+
+# Default values for redis configs
+default_redis = {
+ "host": get("REDIS_HOST_FOR_DYNACONF", "localhost"),
+ "port": int(get("REDIS_PORT_FOR_DYNACONF", 6379)),
+ "db": int(get("REDIS_DB_FOR_DYNACONF", 0)),
+ "decode_responses": get("REDIS_DECODE_FOR_DYNACONF", True),
+ "username": get("REDIS_USERNAME_FOR_DYNACONF", None),
+ "password": get("REDIS_PASSWORD_FOR_DYNACONF", None),
+}
+REDIS_FOR_DYNACONF = get("REDIS_FOR_DYNACONF", default_redis)
+REDIS_ENABLED_FOR_DYNACONF = get("REDIS_ENABLED_FOR_DYNACONF", False)
+
+# Hashicorp Vault Project
+vault_scheme = get("VAULT_SCHEME_FOR_DYNACONF", "http")
+vault_host = get("VAULT_HOST_FOR_DYNACONF", "localhost")
+vault_port = get("VAULT_PORT_FOR_DYNACONF", "8200")
+default_vault = {
+ "url": get(
+ "VAULT_URL_FOR_DYNACONF", f"{vault_scheme}://{vault_host}:{vault_port}"
+ ),
+ "token": get("VAULT_TOKEN_FOR_DYNACONF", None),
+ "cert": get("VAULT_CERT_FOR_DYNACONF", None),
+ "verify": get("VAULT_VERIFY_FOR_DYNACONF", None),
+ "timeout": get("VAULT_TIMEOUT_FOR_DYNACONF", None),
+ "proxies": get("VAULT_PROXIES_FOR_DYNACONF", None),
+ "allow_redirects": get("VAULT_ALLOW_REDIRECTS_FOR_DYNACONF", None),
+ "namespace": get("VAULT_NAMESPACE_FOR_DYNACONF", None),
+}
+VAULT_FOR_DYNACONF = get("VAULT_FOR_DYNACONF", default_vault)
+VAULT_ENABLED_FOR_DYNACONF = get("VAULT_ENABLED_FOR_DYNACONF", False)
+VAULT_PATH_FOR_DYNACONF = get("VAULT_PATH_FOR_DYNACONF", "dynaconf")
+VAULT_MOUNT_POINT_FOR_DYNACONF = get(
+ "VAULT_MOUNT_POINT_FOR_DYNACONF", "secret"
+)
+VAULT_ROOT_TOKEN_FOR_DYNACONF = get("VAULT_ROOT_TOKEN_FOR_DYNACONF", None)
+VAULT_KV_VERSION_FOR_DYNACONF = get("VAULT_KV_VERSION_FOR_DYNACONF", 1)
+VAULT_AUTH_WITH_IAM_FOR_DYNACONF = get(
+ "VAULT_AUTH_WITH_IAM_FOR_DYNACONF", False
+)
+VAULT_AUTH_ROLE_FOR_DYNACONF = get("VAULT_AUTH_ROLE_FOR_DYNACONF", None)
+VAULT_ROLE_ID_FOR_DYNACONF = get("VAULT_ROLE_ID_FOR_DYNACONF", None)
+VAULT_SECRET_ID_FOR_DYNACONF = get("VAULT_SECRET_ID_FOR_DYNACONF", None)
+
+# Only core loaders defined on this list will be invoked
+core_loaders = ["YAML", "TOML", "INI", "JSON", "PY"]
+CORE_LOADERS_FOR_DYNACONF = get("CORE_LOADERS_FOR_DYNACONF", core_loaders)
+
+# External Loaders to read vars from different data stores
+default_loaders = [
+ "dynaconf.loaders.env_loader",
+ # 'dynaconf.loaders.redis_loader'
+ # 'dynaconf.loaders.vault_loader'
+]
+LOADERS_FOR_DYNACONF = get("LOADERS_FOR_DYNACONF", default_loaders)
+
+# Errors in loaders should be silenced?
+SILENT_ERRORS_FOR_DYNACONF = get("SILENT_ERRORS_FOR_DYNACONF", True)
+
+# always fresh variables
+FRESH_VARS_FOR_DYNACONF = get("FRESH_VARS_FOR_DYNACONF", [])
+
+DOTENV_PATH_FOR_DYNACONF = get("DOTENV_PATH_FOR_DYNACONF", None)
+DOTENV_VERBOSE_FOR_DYNACONF = get("DOTENV_VERBOSE_FOR_DYNACONF", False)
+DOTENV_OVERRIDE_FOR_DYNACONF = get("DOTENV_OVERRIDE_FOR_DYNACONF", False)
+
+# Currently this is only used by cli. INSTANCE_FOR_DYNACONF specifies python
+# dotted path to custom LazySettings instance. Last dotted path item should be
+# instance of LazySettings.
+INSTANCE_FOR_DYNACONF = get("INSTANCE_FOR_DYNACONF", None)
+
+# https://msg.pyyaml.org/load
+YAML_LOADER_FOR_DYNACONF = get("YAML_LOADER_FOR_DYNACONF", "safe_load")
+
+# Use commentjson? https://commentjson.readthedocs.io/en/latest/
+COMMENTJSON_ENABLED_FOR_DYNACONF = get(
+ "COMMENTJSON_ENABLED_FOR_DYNACONF", False
+)
+
+# Extra file, or list of files where to look for secrets
+# useful for CI environment like jenkins
+# where you can export this variable pointing to a local
+# absolute path of the secrets file.
+SECRETS_FOR_DYNACONF = get("SECRETS_FOR_DYNACONF", None)
+
+# To include extra paths based on envvar
+INCLUDES_FOR_DYNACONF = get("INCLUDES_FOR_DYNACONF", [])
+
+# To pre-load extra paths based on envvar
+PRELOAD_FOR_DYNACONF = get("PRELOAD_FOR_DYNACONF", [])
+
+# Files to skip if found on search tree
+SKIP_FILES_FOR_DYNACONF = get("SKIP_FILES_FOR_DYNACONF", [])
+
+# YAML reads empty vars as None, should dynaconf apply validator defaults?
+# this is set to None, then evaluated on base.Settings.setdefault
+# possible values are True/False
+APPLY_DEFAULT_ON_NONE_FOR_DYNACONF = get(
+ "APPLY_DEFAULT_ON_NONE_FOR_DYNACONF", None
+)
+
+
+# Backwards compatibility with renamed variables
+for old, new in RENAMED_VARS.items():
+ setattr(sys.modules[__name__], old, locals()[new])
diff --git a/libs/dynaconf/loaders/__init__.py b/libs/dynaconf/loaders/__init__.py
new file mode 100644
index 000000000..e18cb8434
--- /dev/null
+++ b/libs/dynaconf/loaders/__init__.py
@@ -0,0 +1,277 @@
+from __future__ import annotations
+
+import importlib
+import os
+
+from dynaconf import constants as ct
+from dynaconf import default_settings
+from dynaconf.loaders import ini_loader
+from dynaconf.loaders import json_loader
+from dynaconf.loaders import py_loader
+from dynaconf.loaders import toml_loader
+from dynaconf.loaders import yaml_loader
+from dynaconf.utils import deduplicate
+from dynaconf.utils import ensure_a_list
+from dynaconf.utils.boxing import DynaBox
+from dynaconf.utils.files import get_local_filename
+from dynaconf.utils.parse_conf import false_values
+
+
+def default_loader(obj, defaults=None):
+ """Loads default settings and check if there are overridings
+ exported as environment variables"""
+ defaults = defaults or {}
+ default_settings_values = {
+ key: value
+ for key, value in default_settings.__dict__.items() # noqa
+ if key.isupper()
+ }
+
+ all_keys = deduplicate(
+ list(defaults.keys()) + list(default_settings_values.keys())
+ )
+
+ for key in all_keys:
+ if not obj.exists(key):
+ value = defaults.get(key, default_settings_values.get(key))
+ obj.set(key, value)
+
+ # start dotenv to get default env vars from there
+ # check overrides in env vars
+ if obj.get("load_dotenv") is True:
+ default_settings.start_dotenv(obj)
+
+ # Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED
+ # Example: Flask and Django Extensions
+ env_switcher = defaults.get(
+ "ENV_SWITCHER_FOR_DYNACONF", "ENV_FOR_DYNACONF"
+ )
+
+ for key in all_keys:
+ if key not in default_settings_values.keys():
+ continue
+
+ env_value = obj.get_environ(
+ env_switcher if key == "ENV_FOR_DYNACONF" else key,
+ default="_not_found",
+ )
+
+ if env_value != "_not_found":
+ obj.set(key, env_value, tomlfy=True)
+
+
+def _run_hook_module(hook, hook_module, obj, key=None):
+ """Run the hook function from the settings obj.
+
+ given a hook name, a hook_module and a settings object
+ load the function and execute if found.
+ """
+ if hook in obj._loaded_hooks.get(hook_module.__file__, {}):
+ # already loaded
+ return
+
+ if hook_module and getattr(hook_module, "_error", False):
+ if not isinstance(hook_module._error, FileNotFoundError):
+ raise hook_module._error
+
+ hook_func = getattr(hook_module, hook, None)
+ if hook_func:
+ hook_dict = hook_func(obj.dynaconf.clone())
+ if hook_dict:
+ merge = hook_dict.pop(
+ "dynaconf_merge", hook_dict.pop("DYNACONF_MERGE", False)
+ )
+ if key and key in hook_dict:
+ obj.set(key, hook_dict[key], tomlfy=False, merge=merge)
+ elif not key:
+ obj.update(hook_dict, tomlfy=False, merge=merge)
+ obj._loaded_hooks[hook_module.__file__][hook] = hook_dict
+
+
+def execute_hooks(
+ hook, obj, env=None, silent=True, key=None, modules=None, files=None
+):
+ """Execute dynaconf_hooks from module or filepath."""
+ if hook not in ["post"]:
+ raise ValueError(f"hook {hook} not supported yet.")
+
+ # try to load hooks using python module __name__
+ modules = modules or obj._loaded_py_modules
+ for loaded_module in modules:
+ hook_module_name = ".".join(
+ loaded_module.split(".")[:-1] + ["dynaconf_hooks"]
+ )
+ try:
+ hook_module = importlib.import_module(hook_module_name)
+ except (ImportError, TypeError):
+ # There was no hook on the same path as a python module
+ continue
+ else:
+ _run_hook_module(
+ hook=hook,
+ hook_module=hook_module,
+ obj=obj,
+ key=key,
+ )
+
+ # Try to load from python filename path
+ files = files or obj._loaded_files
+ for loaded_file in files:
+ hook_file = os.path.join(
+ os.path.dirname(loaded_file), "dynaconf_hooks.py"
+ )
+ hook_module = py_loader.import_from_filename(
+ obj, hook_file, silent=silent
+ )
+ if not hook_module:
+ # There was no hook on the same path as a python file
+ continue
+ _run_hook_module(
+ hook=hook,
+ hook_module=hook_module,
+ obj=obj,
+ key=key,
+ )
+
+
+def settings_loader(
+ obj, settings_module=None, env=None, silent=True, key=None, filename=None
+):
+ """Loads from defined settings module
+
+ :param obj: A dynaconf instance
+ :param settings_module: A path or a list of paths e.g settings.toml
+ :param env: Env to look for data defaults: development
+ :param silent: Boolean to raise loading errors
+ :param key: Load a single key if provided
+ :param filename: optional filename to override the settings_module
+ """
+ if filename is None:
+ settings_module = settings_module or obj.settings_module
+ if not settings_module: # pragma: no cover
+ return
+ files = ensure_a_list(settings_module)
+ else:
+ files = ensure_a_list(filename)
+
+ files.extend(ensure_a_list(obj.get("SECRETS_FOR_DYNACONF", None)))
+
+ found_files = []
+ modules_names = []
+ for item in files:
+ item = str(item) # Ensure str in case of LocalPath/Path is passed.
+ if item.endswith(ct.ALL_EXTENSIONS + (".py",)):
+ p_root = obj._root_path or (
+ os.path.dirname(found_files[0]) if found_files else None
+ )
+ found = obj.find_file(item, project_root=p_root)
+ if found:
+ found_files.append(found)
+ else:
+ # a bare python module name w/o extension
+ modules_names.append(item)
+
+ enabled_core_loaders = [
+ item.upper() for item in obj.get("CORE_LOADERS_FOR_DYNACONF") or []
+ ]
+
+ # add `.local.` to found_files list to search for local files.
+ found_files.extend(
+ [
+ get_local_filename(item)
+ for item in found_files
+ if ".local." not in str(item)
+ ]
+ )
+
+ for mod_file in modules_names + found_files:
+ # can be set to multiple files settings.py,settings.yaml,...
+
+ # Cascade all loaders
+ loaders = [
+ {"ext": ct.YAML_EXTENSIONS, "name": "YAML", "loader": yaml_loader},
+ {"ext": ct.TOML_EXTENSIONS, "name": "TOML", "loader": toml_loader},
+ {"ext": ct.INI_EXTENSIONS, "name": "INI", "loader": ini_loader},
+ {"ext": ct.JSON_EXTENSIONS, "name": "JSON", "loader": json_loader},
+ ]
+
+ for loader in loaders:
+ if loader["name"] not in enabled_core_loaders:
+ continue
+
+ if mod_file.endswith(loader["ext"]):
+ loader["loader"].load(
+ obj, filename=mod_file, env=env, silent=silent, key=key
+ )
+ continue
+
+ if mod_file.endswith(ct.ALL_EXTENSIONS):
+ continue
+
+ if "PY" not in enabled_core_loaders:
+ # pyloader is disabled
+ continue
+
+ # must be Python file or module
+ # load from default defined module settings.py or .secrets.py if exists
+ py_loader.load(obj, mod_file, key=key)
+
+ # load from the current env e.g: development_settings.py
+ env = env or obj.current_env
+ if mod_file.endswith(".py"):
+ if ".secrets.py" == mod_file:
+ tmpl = ".{0}_{1}{2}"
+ mod_file = "secrets.py"
+ else:
+ tmpl = "{0}_{1}{2}"
+
+ dirname = os.path.dirname(mod_file)
+ filename, extension = os.path.splitext(os.path.basename(mod_file))
+ new_filename = tmpl.format(env.lower(), filename, extension)
+ env_mod_file = os.path.join(dirname, new_filename)
+ global_filename = tmpl.format("global", filename, extension)
+ global_mod_file = os.path.join(dirname, global_filename)
+ else:
+ env_mod_file = f"{env.lower()}_{mod_file}"
+ global_mod_file = f"global_{mod_file}"
+
+ py_loader.load(
+ obj,
+ env_mod_file,
+ identifier=f"py_{env.upper()}",
+ silent=True,
+ key=key,
+ )
+
+ # load from global_settings.py
+ py_loader.load(
+ obj, global_mod_file, identifier="py_global", silent=True, key=key
+ )
+
+
+def enable_external_loaders(obj):
+ """Enable external service loaders like `VAULT_` and `REDIS_`
+ looks forenv variables like `REDIS_ENABLED_FOR_DYNACONF`
+ """
+ for name, loader in ct.EXTERNAL_LOADERS.items():
+ enabled = getattr(obj, f"{name.upper()}_ENABLED_FOR_DYNACONF", False)
+ if (
+ enabled
+ and enabled not in false_values
+ and loader not in obj.LOADERS_FOR_DYNACONF
+ ): # noqa
+ obj.LOADERS_FOR_DYNACONF.insert(0, loader)
+
+
+def write(filename, data, env=None):
+ """Writes `data` to `filename` infers format by file extension."""
+ loader_name = f"{filename.rpartition('.')[-1]}_loader"
+ loader = globals().get(loader_name)
+ if not loader:
+ raise OSError(f"{loader_name} cannot be found.")
+
+ data = DynaBox(data, box_settings={}).to_dict()
+ if loader is not py_loader and env and env not in data:
+ data = {env: data}
+
+ loader.write(filename, data, merge=False)
diff --git a/libs/dynaconf/loaders/base.py b/libs/dynaconf/loaders/base.py
new file mode 100644
index 000000000..dec5cb0af
--- /dev/null
+++ b/libs/dynaconf/loaders/base.py
@@ -0,0 +1,195 @@
+from __future__ import annotations
+
+import io
+import warnings
+
+from dynaconf.utils import build_env_list
+from dynaconf.utils import ensure_a_list
+from dynaconf.utils import upperfy
+
+
+class BaseLoader:
+ """Base loader for dynaconf source files.
+
+ :param obj: {[LazySettings]} -- [Dynaconf settings]
+ :param env: {[string]} -- [the current env to be loaded defaults to
+ [development]]
+ :param identifier: {[string]} -- [identifier ini, yaml, json, py, toml]
+ :param extensions: {[list]} -- [List of extensions with dots ['.a', '.b']]
+ :param file_reader: {[callable]} -- [reads file return dict]
+ :param string_reader: {[callable]} -- [reads string return dict]
+ """
+
+ def __init__(
+ self,
+ obj,
+ env,
+ identifier,
+ extensions,
+ file_reader,
+ string_reader,
+ opener_params=None,
+ ):
+ """Instantiates a loader for different sources"""
+ self.obj = obj
+ self.env = env or obj.current_env
+ self.identifier = identifier
+ self.extensions = extensions
+ self.file_reader = file_reader
+ self.string_reader = string_reader
+ self.opener_params = opener_params or {
+ "mode": "r",
+ "encoding": obj.get("ENCODING_FOR_DYNACONF", "utf-8"),
+ }
+
+ @staticmethod
+ def warn_not_installed(obj, identifier): # pragma: no cover
+ if identifier not in obj._not_installed_warnings:
+ warnings.warn(
+ f"{identifier} support is not installed in your environment. "
+ f"`pip install dynaconf[{identifier}]`"
+ )
+ obj._not_installed_warnings.append(identifier)
+
+ def load(self, filename=None, key=None, silent=True):
+ """
+ Reads and loads in to `self.obj` a single key or all keys from source
+
+ :param filename: Optional filename to load
+ :param key: if provided load a single key
+ :param silent: if load errors should be silenced
+ """
+
+ filename = filename or self.obj.get(self.identifier.upper())
+ if not filename:
+ return
+
+ if not isinstance(filename, (list, tuple)):
+ split_files = ensure_a_list(filename)
+ if all([f.endswith(self.extensions) for f in split_files]): # noqa
+ files = split_files # it is a ['file.ext', ...]
+ else: # it is a single config as string
+ files = [filename]
+ else: # it is already a list/tuple
+ files = filename
+
+ source_data = self.get_source_data(files)
+
+ if self.obj.get("ENVIRONMENTS_FOR_DYNACONF") is False:
+ self._envless_load(source_data, silent, key)
+ else:
+ self._load_all_envs(source_data, silent, key)
+
+ def get_source_data(self, files):
+ """Reads each file and returns source data for each file
+ {"path/to/file.ext": {"key": "value"}}
+ """
+ data = {}
+ for source_file in files:
+ if source_file.endswith(self.extensions):
+ try:
+ with open(source_file, **self.opener_params) as open_file:
+ content = self.file_reader(open_file)
+ self.obj._loaded_files.append(source_file)
+ if content:
+ data[source_file] = content
+ except OSError as e:
+ if ".local." not in source_file:
+ warnings.warn(
+ f"{self.identifier}_loader: {source_file} "
+ f":{str(e)}"
+ )
+ else:
+ # for tests it is possible to pass string
+ content = self.string_reader(source_file)
+ if content:
+ data[source_file] = content
+ return data
+
+ def _envless_load(self, source_data, silent=True, key=None):
+ """Load all the keys from each file without env separation"""
+ for file_data in source_data.values():
+ self._set_data_to_obj(
+ file_data,
+ self.identifier,
+ key=key,
+ )
+
+ def _load_all_envs(self, source_data, silent=True, key=None):
+ """Load configs from files separating by each environment"""
+
+ for file_data in source_data.values():
+
+ # env name is checked in lower
+ file_data = {k.lower(): value for k, value in file_data.items()}
+
+ # is there a `dynaconf_merge` on top level of file?
+ file_merge = file_data.get("dynaconf_merge")
+
+ # is there a flag disabling dotted lookup on file?
+ file_dotted_lookup = file_data.get("dynaconf_dotted_lookup")
+
+ for env in build_env_list(self.obj, self.env):
+ env = env.lower() # lower for better comparison
+
+ try:
+ data = file_data[env] or {}
+ except KeyError:
+ if silent:
+ continue
+ raise
+
+ if not data:
+ continue
+
+ self._set_data_to_obj(
+ data,
+ f"{self.identifier}_{env}",
+ file_merge,
+ key,
+ file_dotted_lookup=file_dotted_lookup,
+ )
+
+ def _set_data_to_obj(
+ self,
+ data,
+ identifier,
+ file_merge=None,
+ key=False,
+ file_dotted_lookup=None,
+ ):
+ """Calls settings.set to add the keys"""
+ # data 1st level keys should be transformed to upper case.
+ data = {upperfy(k): v for k, v in data.items()}
+ if key:
+ key = upperfy(key)
+
+ if self.obj.filter_strategy:
+ data = self.obj.filter_strategy(data)
+
+ # is there a `dynaconf_merge` inside an `[env]`?
+ file_merge = file_merge or data.pop("DYNACONF_MERGE", False)
+
+ # If not passed or passed as None,
+ # look for inner [env] value, or default settings.
+ if file_dotted_lookup is None:
+ file_dotted_lookup = data.pop(
+ "DYNACONF_DOTTED_LOOKUP",
+ self.obj.get("DOTTED_LOOKUP_FOR_DYNACONF"),
+ )
+
+ if not key:
+ self.obj.update(
+ data,
+ loader_identifier=identifier,
+ merge=file_merge,
+ dotted_lookup=file_dotted_lookup,
+ )
+ elif key in data:
+ self.obj.set(
+ key,
+ data.get(key),
+ loader_identifier=identifier,
+ merge=file_merge,
+ dotted_lookup=file_dotted_lookup,
+ )
diff --git a/libs/dynaconf/loaders/env_loader.py b/libs/dynaconf/loaders/env_loader.py
new file mode 100644
index 000000000..779e9a4f6
--- /dev/null
+++ b/libs/dynaconf/loaders/env_loader.py
@@ -0,0 +1,108 @@
+from __future__ import annotations
+
+from os import environ
+
+from dynaconf.utils import missing
+from dynaconf.utils import upperfy
+from dynaconf.utils.parse_conf import parse_conf_data
+
+DOTENV_IMPORTED = False
+try:
+ from dynaconf.vendor.dotenv import cli as dotenv_cli
+
+ DOTENV_IMPORTED = True
+except ImportError:
+ pass
+except FileNotFoundError:
+ pass
+
+
+IDENTIFIER = "env"
+
+
+def load(obj, env=None, silent=True, key=None):
+ """Loads envvars with prefixes:
+
+ `DYNACONF_` (default global) or `$(ENVVAR_PREFIX_FOR_DYNACONF)_`
+ """
+ global_prefix = obj.get("ENVVAR_PREFIX_FOR_DYNACONF")
+ if global_prefix is False or global_prefix.upper() != "DYNACONF":
+ load_from_env(obj, "DYNACONF", key, silent, IDENTIFIER + "_global")
+
+ # Load the global env if exists and overwrite everything
+ load_from_env(obj, global_prefix, key, silent, IDENTIFIER + "_global")
+
+
+def load_from_env(
+ obj,
+ prefix=False,
+ key=None,
+ silent=False,
+ identifier=IDENTIFIER,
+ env=False, # backwards compatibility bc renamed param
+):
+ if prefix is False and env is not False:
+ prefix = env
+
+ env_ = ""
+ if prefix is not False:
+ if not isinstance(prefix, str):
+ raise TypeError("`prefix/env` must be str or False")
+
+ prefix = prefix.upper()
+ env_ = f"{prefix}_"
+
+ # Load a single environment variable explicitly.
+ if key:
+ key = upperfy(key)
+ value = environ.get(f"{env_}{key}")
+ if value:
+ try: # obj is a Settings
+ obj.set(key, value, loader_identifier=identifier, tomlfy=True)
+ except AttributeError: # obj is a dict
+ obj[key] = parse_conf_data(
+ value, tomlfy=True, box_settings=obj
+ )
+
+ # Load environment variables in bulk (when matching).
+ else:
+ # Only known variables should be loaded from environment?
+ ignore_unknown = obj.get("IGNORE_UNKNOWN_ENVVARS_FOR_DYNACONF")
+
+ trim_len = len(env_)
+ data = {
+ key[trim_len:]: parse_conf_data(
+ data, tomlfy=True, box_settings=obj
+ )
+ for key, data in environ.items()
+ if key.startswith(env_)
+ and not (
+ # Ignore environment variables that haven't been
+ # pre-defined in settings space.
+ ignore_unknown
+ and obj.get(key[trim_len:], default=missing) is missing
+ )
+ }
+ # Update the settings space based on gathered data from environment.
+ if data:
+ filter_strategy = obj.get("FILTER_STRATEGY")
+ if filter_strategy:
+ data = filter_strategy(data)
+ obj.update(data, loader_identifier=identifier)
+
+
+def write(settings_path, settings_data, **kwargs):
+ """Write data to .env file"""
+ if not DOTENV_IMPORTED:
+ return
+ for key, value in settings_data.items():
+ quote_mode = (
+ isinstance(value, str)
+ and (value.startswith("'") or value.startswith('"'))
+ ) or isinstance(value, (list, dict))
+ dotenv_cli.set_key(
+ str(settings_path),
+ key,
+ str(value),
+ quote_mode="always" if quote_mode else "none",
+ )
diff --git a/libs/dynaconf/loaders/ini_loader.py b/libs/dynaconf/loaders/ini_loader.py
new file mode 100644
index 000000000..c3b56fd36
--- /dev/null
+++ b/libs/dynaconf/loaders/ini_loader.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+import io
+from pathlib import Path
+
+from dynaconf import default_settings
+from dynaconf.constants import INI_EXTENSIONS
+from dynaconf.loaders.base import BaseLoader
+from dynaconf.utils import object_merge
+
+try:
+ from configobj import ConfigObj
+except ImportError: # pragma: no cover
+ ConfigObj = None
+
+
+def load(obj, env=None, silent=True, key=None, filename=None):
+ """
+ Reads and loads in to "obj" a single key or all keys from source file.
+
+ :param obj: the settings instance
+ :param env: settings current env default='development'
+ :param silent: if errors should raise
+ :param key: if defined load a single key, else load all in env
+ :param filename: Optional custom filename to load
+ :return: None
+ """
+ if ConfigObj is None: # pragma: no cover
+ BaseLoader.warn_not_installed(obj, "ini")
+ return
+
+ loader = BaseLoader(
+ obj=obj,
+ env=env,
+ identifier="ini",
+ extensions=INI_EXTENSIONS,
+ file_reader=lambda fileobj: ConfigObj(fileobj).dict(),
+ string_reader=lambda strobj: ConfigObj(strobj.split("\n")).dict(),
+ )
+ loader.load(
+ filename=filename,
+ key=key,
+ silent=silent,
+ )
+
+
+def write(settings_path, settings_data, merge=True):
+ """Write data to a settings file.
+
+ :param settings_path: the filepath
+ :param settings_data: a dictionary with data
+ :param merge: boolean if existing file should be merged with new data
+ """
+ settings_path = Path(settings_path)
+ if settings_path.exists() and merge: # pragma: no cover
+ with open(
+ str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF
+ ) as open_file:
+ object_merge(ConfigObj(open_file).dict(), settings_data)
+ new = ConfigObj()
+ new.update(settings_data)
+ new.write(open(str(settings_path), "bw"))
diff --git a/libs/dynaconf/loaders/json_loader.py b/libs/dynaconf/loaders/json_loader.py
new file mode 100644
index 000000000..72c1e340e
--- /dev/null
+++ b/libs/dynaconf/loaders/json_loader.py
@@ -0,0 +1,80 @@
+from __future__ import annotations
+
+import io
+import json
+from pathlib import Path
+
+from dynaconf import default_settings
+from dynaconf.constants import JSON_EXTENSIONS
+from dynaconf.loaders.base import BaseLoader
+from dynaconf.utils import object_merge
+from dynaconf.utils.parse_conf import try_to_encode
+
+try: # pragma: no cover
+ import commentjson
+except ImportError: # pragma: no cover
+ commentjson = None
+
+
+def load(obj, env=None, silent=True, key=None, filename=None):
+ """
+ Reads and loads in to "obj" a single key or all keys from source file.
+
+ :param obj: the settings instance
+ :param env: settings current env default='development'
+ :param silent: if errors should raise
+ :param key: if defined load a single key, else load all in env
+ :param filename: Optional custom filename to load
+ :return: None
+ """
+ if (
+ obj.get("COMMENTJSON_ENABLED_FOR_DYNACONF") and commentjson
+ ): # pragma: no cover # noqa
+ file_reader = commentjson.load
+ string_reader = commentjson.loads
+ else:
+ file_reader = json.load
+ string_reader = json.loads
+
+ loader = BaseLoader(
+ obj=obj,
+ env=env,
+ identifier="json",
+ extensions=JSON_EXTENSIONS,
+ file_reader=file_reader,
+ string_reader=string_reader,
+ )
+ loader.load(
+ filename=filename,
+ key=key,
+ silent=silent,
+ )
+
+
+def write(settings_path, settings_data, merge=True):
+ """Write data to a settings file.
+
+ :param settings_path: the filepath
+ :param settings_data: a dictionary with data
+ :param merge: boolean if existing file should be merged with new data
+ """
+ settings_path = Path(settings_path)
+ if settings_path.exists() and merge: # pragma: no cover
+ with open(
+ str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF
+ ) as open_file:
+ object_merge(json.load(open_file), settings_data)
+
+ with open(
+ str(settings_path),
+ "w",
+ encoding=default_settings.ENCODING_FOR_DYNACONF,
+ ) as open_file:
+ json.dump(settings_data, open_file, cls=DynaconfEncoder)
+
+
+class DynaconfEncoder(json.JSONEncoder):
+ """Transform Dynaconf custom types instances to json representation"""
+
+ def default(self, o):
+ return try_to_encode(o, callback=super().default)
diff --git a/libs/dynaconf/loaders/py_loader.py b/libs/dynaconf/loaders/py_loader.py
new file mode 100644
index 000000000..f29645971
--- /dev/null
+++ b/libs/dynaconf/loaders/py_loader.py
@@ -0,0 +1,148 @@
+from __future__ import annotations
+
+import errno
+import importlib
+import inspect
+import io
+import types
+from contextlib import suppress
+from pathlib import Path
+
+from dynaconf import default_settings
+from dynaconf.utils import DynaconfDict
+from dynaconf.utils import object_merge
+from dynaconf.utils import upperfy
+from dynaconf.utils.files import find_file
+
+
+def load(obj, settings_module, identifier="py", silent=False, key=None):
+ """Tries to import a python module"""
+ mod, loaded_from = get_module(obj, settings_module, silent)
+ if not (mod and loaded_from):
+ return
+ load_from_python_object(obj, mod, settings_module, key, identifier)
+
+
+def load_from_python_object(
+ obj, mod, settings_module, key=None, identifier=None
+):
+ file_merge = getattr(mod, "dynaconf_merge", False) or getattr(
+ mod, "DYNACONF_MERGE", False
+ )
+ for setting in dir(mod):
+ # A setting var in a Python file should start with upper case
+ # valid: A_value=1, ABC_value=3 A_BBB__default=1
+ # invalid: a_value=1, MyValue=3
+ # This is to avoid loading functions, classes and built-ins
+ if setting.split("__")[0].isupper():
+ if key is None or key == setting:
+ setting_value = getattr(mod, setting)
+ obj.set(
+ setting,
+ setting_value,
+ loader_identifier=identifier,
+ merge=file_merge,
+ )
+
+ obj._loaded_py_modules.append(mod.__name__)
+ obj._loaded_files.append(mod.__file__)
+
+
+def try_to_load_from_py_module_name(
+ obj, name, key=None, identifier="py", silent=False
+):
+ """Try to load module by its string name.
+
+ Arguments:
+ obj {LAzySettings} -- Dynaconf settings instance
+ name {str} -- Name of the module e.g: foo.bar.zaz
+
+ Keyword Arguments:
+ key {str} -- Single key to be loaded (default: {None})
+ identifier {str} -- Name of identifier to store (default: 'py')
+ silent {bool} -- Weather to raise or silence exceptions.
+ """
+ ctx = suppress(ImportError, TypeError) if silent else suppress()
+
+ with ctx:
+ mod = importlib.import_module(str(name))
+ load_from_python_object(obj, mod, name, key, identifier)
+ return True # loaded ok!
+ # if it reaches this point that means exception occurred, module not found.
+ return False
+
+
+def get_module(obj, filename, silent=False):
+ try:
+ mod = importlib.import_module(filename)
+ loaded_from = "module"
+ mod.is_error = False
+ except (ImportError, TypeError):
+ mod = import_from_filename(obj, filename, silent=silent)
+ if mod and not mod._is_error:
+ loaded_from = "filename"
+ else:
+ # it is important to return None in case of not loaded
+ loaded_from = None
+ return mod, loaded_from
+
+
+def import_from_filename(obj, filename, silent=False): # pragma: no cover
+ """If settings_module is a filename path import it."""
+ if filename in [item.filename for item in inspect.stack()]:
+ raise ImportError(
+ "Looks like you are loading dynaconf "
+ f"from inside the {filename} file and then it is trying "
+ "to load itself entering in a circular reference "
+ "problem. To solve it you have to "
+ "invoke your program from another root folder "
+ "or rename your program file."
+ )
+
+ _find_file = getattr(obj, "find_file", find_file)
+ if not filename.endswith(".py"):
+ filename = f"{filename}.py"
+
+ if filename in default_settings.SETTINGS_FILE_FOR_DYNACONF:
+ silent = True
+ mod = types.ModuleType(filename.rstrip(".py"))
+ mod.__file__ = filename
+ mod._is_error = False
+ mod._error = None
+ try:
+ with open(
+ _find_file(filename),
+ encoding=default_settings.ENCODING_FOR_DYNACONF,
+ ) as config_file:
+ exec(compile(config_file.read(), filename, "exec"), mod.__dict__)
+ except OSError as e:
+ e.strerror = (
+ f"py_loader: error loading file " f"({e.strerror} {filename})\n"
+ )
+ if silent and e.errno in (errno.ENOENT, errno.EISDIR):
+ return
+ mod._is_error = True
+ mod._error = e
+ return mod
+
+
+def write(settings_path, settings_data, merge=True):
+ """Write data to a settings file.
+
+ :param settings_path: the filepath
+ :param settings_data: a dictionary with data
+ :param merge: boolean if existing file should be merged with new data
+ """
+ settings_path = Path(settings_path)
+ if settings_path.exists() and merge: # pragma: no cover
+ existing = DynaconfDict()
+ load(existing, str(settings_path))
+ object_merge(existing, settings_data)
+ with open(
+ str(settings_path),
+ "w",
+ encoding=default_settings.ENCODING_FOR_DYNACONF,
+ ) as f:
+ f.writelines(
+ [f"{upperfy(k)} = {repr(v)}\n" for k, v in settings_data.items()]
+ )
diff --git a/libs/dynaconf/loaders/redis_loader.py b/libs/dynaconf/loaders/redis_loader.py
new file mode 100644
index 000000000..1123cb092
--- /dev/null
+++ b/libs/dynaconf/loaders/redis_loader.py
@@ -0,0 +1,108 @@
+from __future__ import annotations
+
+from dynaconf.utils import build_env_list
+from dynaconf.utils import upperfy
+from dynaconf.utils.parse_conf import parse_conf_data
+from dynaconf.utils.parse_conf import unparse_conf_data
+
+try:
+ from redis import StrictRedis
+except ImportError:
+ StrictRedis = None
+
+IDENTIFIER = "redis"
+
+
+def load(obj, env=None, silent=True, key=None):
+ """Reads and loads in to "settings" a single key or all keys from redis
+
+ :param obj: the settings instance
+ :param env: settings env default='DYNACONF'
+ :param silent: if errors should raise
+ :param key: if defined load a single key, else load all in env
+ :return: None
+ """
+ if StrictRedis is None:
+ raise ImportError(
+ "redis package is not installed in your environment. "
+ "`pip install dynaconf[redis]` or disable the redis loader with "
+ "export REDIS_ENABLED_FOR_DYNACONF=false"
+ )
+
+ redis = StrictRedis(**obj.get("REDIS_FOR_DYNACONF"))
+ prefix = obj.get("ENVVAR_PREFIX_FOR_DYNACONF")
+ # prefix is added to env_list to keep backwards compatibility
+ env_list = [prefix] + build_env_list(obj, env or obj.current_env)
+ for env_name in env_list:
+ holder = f"{prefix.upper()}_{env_name.upper()}"
+ try:
+ if key:
+ value = redis.hget(holder.upper(), key)
+ if value:
+ parsed_value = parse_conf_data(
+ value, tomlfy=True, box_settings=obj
+ )
+ if parsed_value:
+ obj.set(key, parsed_value)
+ else:
+ data = {
+ key: parse_conf_data(value, tomlfy=True, box_settings=obj)
+ for key, value in redis.hgetall(holder.upper()).items()
+ }
+ if data:
+ obj.update(data, loader_identifier=IDENTIFIER)
+ except Exception:
+ if silent:
+ return False
+ raise
+
+
+def write(obj, data=None, **kwargs):
+ """Write a value in to loader source
+
+ :param obj: settings object
+ :param data: vars to be stored
+ :param kwargs: vars to be stored
+ :return:
+ """
+ if obj.REDIS_ENABLED_FOR_DYNACONF is False:
+ raise RuntimeError(
+ "Redis is not configured \n"
+ "export REDIS_ENABLED_FOR_DYNACONF=true\n"
+ "and configure the REDIS_*_FOR_DYNACONF variables"
+ )
+ client = StrictRedis(**obj.REDIS_FOR_DYNACONF)
+ holder = obj.get("ENVVAR_PREFIX_FOR_DYNACONF").upper()
+ # add env to holder
+ holder = f"{holder}_{obj.current_env.upper()}"
+
+ data = data or {}
+ data.update(kwargs)
+ if not data:
+ raise AttributeError("Data must be provided")
+ redis_data = {
+ upperfy(key): unparse_conf_data(value) for key, value in data.items()
+ }
+ client.hmset(holder.upper(), redis_data)
+ load(obj)
+
+
+def delete(obj, key=None):
+ """
+ Delete a single key if specified, or all env if key is none
+ :param obj: settings object
+ :param key: key to delete from store location
+ :return: None
+ """
+ client = StrictRedis(**obj.REDIS_FOR_DYNACONF)
+ holder = obj.get("ENVVAR_PREFIX_FOR_DYNACONF").upper()
+ # add env to holder
+ holder = f"{holder}_{obj.current_env.upper()}"
+
+ if key:
+ client.hdel(holder.upper(), upperfy(key))
+ obj.unset(key)
+ else:
+ keys = client.hkeys(holder.upper())
+ client.delete(holder.upper())
+ obj.unset_all(keys)
diff --git a/libs/dynaconf/loaders/toml_loader.py b/libs/dynaconf/loaders/toml_loader.py
new file mode 100644
index 000000000..f4f6e17ae
--- /dev/null
+++ b/libs/dynaconf/loaders/toml_loader.py
@@ -0,0 +1,122 @@
+from __future__ import annotations
+
+import warnings
+from pathlib import Path
+
+from dynaconf import default_settings
+from dynaconf.constants import TOML_EXTENSIONS
+from dynaconf.loaders.base import BaseLoader
+from dynaconf.utils import object_merge
+from dynaconf.vendor import toml # Backwards compatibility with uiri/toml
+from dynaconf.vendor import tomllib # New tomllib stdlib on py3.11
+
+
+def load(obj, env=None, silent=True, key=None, filename=None):
+ """
+ Reads and loads in to "obj" a single key or all keys from source file.
+
+ :param obj: the settings instance
+ :param env: settings current env default='development'
+ :param silent: if errors should raise
+ :param key: if defined load a single key, else load all in env
+ :param filename: Optional custom filename to load
+ :return: None
+ """
+
+ try:
+ loader = BaseLoader(
+ obj=obj,
+ env=env,
+ identifier="toml",
+ extensions=TOML_EXTENSIONS,
+ file_reader=tomllib.load,
+ string_reader=tomllib.loads,
+ opener_params={"mode": "rb"},
+ )
+ loader.load(
+ filename=filename,
+ key=key,
+ silent=silent,
+ )
+ except UnicodeDecodeError: # pragma: no cover
+ """
+ NOTE: Compat functions exists to keep backwards compatibility with
+ the new tomllib library. The old library was called `toml` and
+ the new one is called `tomllib`.
+
+ The old lib uiri/toml allowed unicode characters and re-added files
+ as string.
+
+ The new tomllib (stdlib) does not allow unicode characters, only
+ utf-8 encoded, and read files as binary.
+
+ NOTE: In dynaconf 4.0.0 we will drop support for the old library
+ removing the compat functions and calling directly the new lib.
+ """
+ loader = BaseLoader(
+ obj=obj,
+ env=env,
+ identifier="toml",
+ extensions=TOML_EXTENSIONS,
+ file_reader=toml.load,
+ string_reader=toml.loads,
+ )
+ loader.load(
+ filename=filename,
+ key=key,
+ silent=silent,
+ )
+
+ warnings.warn(
+ "TOML files should have only UTF-8 encoded characters. "
+ "starting on 4.0.0 dynaconf will stop allowing invalid chars.",
+ )
+
+
+def write(settings_path, settings_data, merge=True):
+ """Write data to a settings file.
+
+ :param settings_path: the filepath
+ :param settings_data: a dictionary with data
+ :param merge: boolean if existing file should be merged with new data
+ """
+ settings_path = Path(settings_path)
+ if settings_path.exists() and merge: # pragma: no cover
+ try: # tomllib first
+ with open(str(settings_path), "rb") as open_file:
+ object_merge(tomllib.load(open_file), settings_data)
+ except UnicodeDecodeError: # pragma: no cover
+ # uiri/toml fallback (TBR on 4.0.0)
+ with open(
+ str(settings_path),
+ encoding=default_settings.ENCODING_FOR_DYNACONF,
+ ) as open_file:
+ object_merge(toml.load(open_file), settings_data)
+
+ try: # tomllib first
+ with open(str(settings_path), "wb") as open_file:
+ tomllib.dump(encode_nulls(settings_data), open_file)
+ except UnicodeEncodeError: # pragma: no cover
+ # uiri/toml fallback (TBR on 4.0.0)
+ with open(
+ str(settings_path),
+ "w",
+ encoding=default_settings.ENCODING_FOR_DYNACONF,
+ ) as open_file:
+ toml.dump(encode_nulls(settings_data), open_file)
+
+ warnings.warn(
+ "TOML files should have only UTF-8 encoded characters. "
+ "starting on 4.0.0 dynaconf will stop allowing invalid chars.",
+ )
+
+
+def encode_nulls(data):
+ """TOML does not support `None` so this function transforms to '@none '."""
+ if data is None:
+ return "@none "
+ if isinstance(data, dict):
+ return {key: encode_nulls(value) for key, value in data.items()}
+ elif isinstance(data, (list, tuple)):
+ return [encode_nulls(item) for item in data]
+ return data
diff --git a/libs/dynaconf/loaders/vault_loader.py b/libs/dynaconf/loaders/vault_loader.py
new file mode 100644
index 000000000..d816ffc63
--- /dev/null
+++ b/libs/dynaconf/loaders/vault_loader.py
@@ -0,0 +1,186 @@
+# docker run -e 'VAULT_DEV_ROOT_TOKEN_ID=myroot' -p 8200:8200 vault
+# pip install hvac
+from __future__ import annotations
+
+from dynaconf.utils import build_env_list
+from dynaconf.utils.parse_conf import parse_conf_data
+
+try:
+ import boto3
+except ImportError:
+ boto3 = None
+
+try:
+ from hvac import Client
+ from hvac.exceptions import InvalidPath
+except ImportError:
+ raise ImportError(
+ "vault package is not installed in your environment. "
+ "`pip install dynaconf[vault]` or disable the vault loader with "
+ "export VAULT_ENABLED_FOR_DYNACONF=false"
+ )
+
+
+IDENTIFIER = "vault"
+
+
+# backwards compatibility
+_get_env_list = build_env_list
+
+
+def get_client(obj):
+ client = Client(
+ **{k: v for k, v in obj.VAULT_FOR_DYNACONF.items() if v is not None}
+ )
+ if obj.VAULT_ROLE_ID_FOR_DYNACONF is not None:
+ client.auth.approle.login(
+ role_id=obj.VAULT_ROLE_ID_FOR_DYNACONF,
+ secret_id=obj.get("VAULT_SECRET_ID_FOR_DYNACONF"),
+ )
+ elif obj.VAULT_ROOT_TOKEN_FOR_DYNACONF is not None:
+ client.token = obj.VAULT_ROOT_TOKEN_FOR_DYNACONF
+ elif obj.VAULT_AUTH_WITH_IAM_FOR_DYNACONF:
+ if boto3 is None:
+ raise ImportError(
+ "boto3 package is not installed in your environment. "
+ "`pip install boto3` or disable the VAULT_AUTH_WITH_IAM"
+ )
+
+ session = boto3.Session()
+ credentials = session.get_credentials()
+ client.auth.aws.iam_login(
+ credentials.access_key,
+ credentials.secret_key,
+ credentials.token,
+ role=obj.VAULT_AUTH_ROLE_FOR_DYNACONF,
+ )
+ assert client.is_authenticated(), (
+ "Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or "
+ "VAULT_ROLE_ID_FOR_DYNACONF defined?"
+ )
+ client.secrets.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF
+ return client
+
+
+def load(obj, env=None, silent=None, key=None):
+ """Reads and loads in to "settings" a single key or all keys from vault
+
+ :param obj: the settings instance
+ :param env: settings env default='DYNACONF'
+ :param silent: if errors should raise
+ :param key: if defined load a single key, else load all in env
+ :return: None
+ """
+ client = get_client(obj)
+ try:
+ if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
+ dirs = client.secrets.kv.v2.list_secrets(
+ path=obj.VAULT_PATH_FOR_DYNACONF,
+ mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
+ )["data"]["keys"]
+ else:
+ dirs = client.secrets.kv.v1.list_secrets(
+ path=obj.VAULT_PATH_FOR_DYNACONF,
+ mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
+ )["data"]["keys"]
+ except InvalidPath:
+ # The given path is not a directory
+ dirs = []
+ # First look for secrets into environments less store
+ if not obj.ENVIRONMENTS_FOR_DYNACONF:
+ # By adding '', dynaconf will now read secrets from environments-less
+ # store which are not written by `dynaconf write` to Vault store
+ env_list = [obj.MAIN_ENV_FOR_DYNACONF.lower(), ""]
+ # Finally, look for secret into all the environments
+ else:
+ env_list = dirs + build_env_list(obj, env)
+ for env in env_list:
+ path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env])
+ try:
+ if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
+ data = client.secrets.kv.v2.read_secret_version(
+ path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF
+ )
+ else:
+ data = client.secrets.kv.read_secret(
+ "data/" + path,
+ mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
+ )
+ except InvalidPath:
+ # If the path doesn't exist, ignore it and set data to None
+ data = None
+ if data:
+ # There seems to be a data dict within a data dict,
+ # extract the inner data
+ data = data.get("data", {}).get("data", {})
+ try:
+ if (
+ obj.VAULT_KV_VERSION_FOR_DYNACONF == 2
+ and obj.ENVIRONMENTS_FOR_DYNACONF
+ and data
+ ):
+ data = data.get("data", {})
+ if data and key:
+ value = parse_conf_data(
+ data.get(key), tomlfy=True, box_settings=obj
+ )
+ if value:
+ obj.set(key, value)
+ elif data:
+ obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
+ except Exception:
+ if silent:
+ return False
+ raise
+
+
+def write(obj, data=None, **kwargs):
+ """Write a value in to loader source
+
+ :param obj: settings object
+ :param data: vars to be stored
+ :param kwargs: vars to be stored
+ :return:
+ """
+ if obj.VAULT_ENABLED_FOR_DYNACONF is False:
+ raise RuntimeError(
+ "Vault is not configured \n"
+ "export VAULT_ENABLED_FOR_DYNACONF=true\n"
+ "and configure the VAULT_FOR_DYNACONF_* variables"
+ )
+ data = data or {}
+ data.update(kwargs)
+ if not data:
+ raise AttributeError("Data must be provided")
+ data = {"data": data}
+ client = get_client(obj)
+ if obj.VAULT_KV_VERSION_FOR_DYNACONF == 1:
+ mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF + "/data"
+ else:
+ mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF
+ path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, obj.current_env.lower()])
+ client.secrets.kv.create_or_update_secret(
+ path, secret=data, mount_point=mount_point
+ )
+ load(obj)
+
+
+def list_envs(obj, path=""):
+ """
+ This function is a helper to get a list of all the existing envs in
+ the source of data, the use case is:
+ existing_envs = vault_loader.list_envs(settings)
+ for env in exiting_envs:
+ with settings.using_env(env): # switch to the env
+ # do something with a key of that env
+
+ :param obj: settings object
+ :param path: path to the vault secrets
+ :return: list containing all the keys at the given path
+ """
+ client = get_client(obj)
+ path = path or obj.get("VAULT_PATH_FOR_DYNACONF")
+ try:
+ return client.list(f"/secret/metadata/{path}")["data"]["keys"]
+ except TypeError:
+ return []
diff --git a/libs/dynaconf/loaders/yaml_loader.py b/libs/dynaconf/loaders/yaml_loader.py
new file mode 100644
index 000000000..37b0b6c6b
--- /dev/null
+++ b/libs/dynaconf/loaders/yaml_loader.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+import io
+from pathlib import Path
+from warnings import warn
+
+from dynaconf import default_settings
+from dynaconf.constants import YAML_EXTENSIONS
+from dynaconf.loaders.base import BaseLoader
+from dynaconf.utils import object_merge
+from dynaconf.utils.parse_conf import try_to_encode
+from dynaconf.vendor.ruamel import yaml
+
+# Add support for Dynaconf Lazy values to YAML dumper
+yaml.SafeDumper.yaml_representers[
+ None
+] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(
+ self, try_to_encode(data)
+)
+
+
+def load(obj, env=None, silent=True, key=None, filename=None):
+ """
+ Reads and loads in to "obj" a single key or all keys from source file.
+
+ :param obj: the settings instance
+ :param env: settings current env default='development'
+ :param silent: if errors should raise
+ :param key: if defined load a single key, else load all in env
+ :param filename: Optional custom filename to load
+ :return: None
+ """
+ # Resolve the loaders
+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
+ # Possible values are `safe_load, full_load, unsafe_load, load`
+ yaml_reader = getattr(
+ yaml, obj.get("YAML_LOADER_FOR_DYNACONF"), yaml.safe_load
+ )
+ if yaml_reader.__name__ == "unsafe_load": # pragma: no cover
+ warn(
+ "yaml.unsafe_load is deprecated."
+ " Please read https://msg.pyyaml.org/load for full details."
+ " Try to use full_load or safe_load."
+ )
+
+ loader = BaseLoader(
+ obj=obj,
+ env=env,
+ identifier="yaml",
+ extensions=YAML_EXTENSIONS,
+ file_reader=yaml_reader,
+ string_reader=yaml_reader,
+ )
+ loader.load(
+ filename=filename,
+ key=key,
+ silent=silent,
+ )
+
+
+def write(settings_path, settings_data, merge=True):
+ """Write data to a settings file.
+
+ :param settings_path: the filepath
+ :param settings_data: a dictionary with data
+ :param merge: boolean if existing file should be merged with new data
+ """
+ settings_path = Path(settings_path)
+ if settings_path.exists() and merge: # pragma: no cover
+ with open(
+ str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF
+ ) as open_file:
+ object_merge(yaml.safe_load(open_file), settings_data)
+
+ with open(
+ str(settings_path),
+ "w",
+ encoding=default_settings.ENCODING_FOR_DYNACONF,
+ ) as open_file:
+ yaml.dump(
+ settings_data,
+ open_file,
+ Dumper=yaml.dumper.SafeDumper,
+ explicit_start=True,
+ indent=2,
+ default_flow_style=False,
+ )
diff --git a/libs/dynaconf/strategies/__init__.py b/libs/dynaconf/strategies/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/libs/dynaconf/strategies/__init__.py
diff --git a/libs/dynaconf/strategies/filtering.py b/libs/dynaconf/strategies/filtering.py
new file mode 100644
index 000000000..ef1f51ff9
--- /dev/null
+++ b/libs/dynaconf/strategies/filtering.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from dynaconf.utils import upperfy
+
+
+class PrefixFilter:
+ def __init__(self, prefix):
+ if not isinstance(prefix, str):
+ raise TypeError("`SETTINGS_FILE_PREFIX` must be str")
+ self.prefix = f"{upperfy(prefix)}_"
+
+ def __call__(self, data):
+ """Filter incoming data by prefix"""
+ len_prefix = len(self.prefix)
+ return {
+ upperfy(key[len_prefix:]): value
+ for key, value in data.items()
+ if upperfy(key[:len_prefix]) == self.prefix
+ }
diff --git a/libs/dynaconf/test_settings.py b/libs/dynaconf/test_settings.py
new file mode 100644
index 000000000..3c43ec903
--- /dev/null
+++ b/libs/dynaconf/test_settings.py
@@ -0,0 +1,8 @@
+# pragma: no cover
+from __future__ import annotations
+
+TESTING = True
+LOADERS_FOR_DYNACONF = [
+ "dynaconf.loaders.env_loader",
+ # 'dynaconf.loaders.redis_loader'
+]
diff --git a/libs/dynaconf/utils/__init__.py b/libs/dynaconf/utils/__init__.py
new file mode 100644
index 000000000..2d1a8c119
--- /dev/null
+++ b/libs/dynaconf/utils/__init__.py
@@ -0,0 +1,461 @@
+from __future__ import annotations
+
+import os
+import warnings
+from collections import defaultdict
+from json import JSONDecoder
+from typing import Any
+from typing import Iterator
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from dynaconf.utils.boxing import DynaBox
+ from dynaconf.base import LazySettings, Settings
+
+
+BANNER = """
+██████╗ ██╗ ██╗███╗ ██╗ █████╗ ██████╗ ██████╗ ███╗ ██╗███████╗
+██╔══██╗╚██╗ ██╔╝████╗ ██║██╔══██╗██╔════╝██╔═══██╗████╗ ██║██╔════╝
+██║ ██║ ╚████╔╝ ██╔██╗ ██║███████║██║ ██║ ██║██╔██╗ ██║█████╗
+██║ ██║ ╚██╔╝ ██║╚██╗██║██╔══██║██║ ██║ ██║██║╚██╗██║██╔══╝
+██████╔╝ ██║ ██║ ╚████║██║ ██║╚██████╗╚██████╔╝██║ ╚████║██║
+╚═════╝ ╚═╝ ╚═╝ ╚═══╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝
+"""
+
+if os.name == "nt": # pragma: no cover
+ # windows can't handle the above charmap
+ BANNER = "DYNACONF"
+
+
+def object_merge(
+ old: Any, new: Any, unique: bool = False, full_path: list[str] = None
+) -> Any:
+ """
+ Recursively merge two data structures, new is mutated in-place.
+
+ :param old: The existing data.
+ :param new: The new data to get old values merged in to.
+ :param unique: When set to True existing list items are not set.
+ :param full_path: Indicates the elements of a tree.
+ """
+ if full_path is None:
+ full_path = []
+ if old == new or old is None or new is None:
+ # Nothing to merge
+ return new
+
+ if isinstance(old, list) and isinstance(new, list):
+
+ # 726: allow local_merge to override global merge on lists
+ if "dynaconf_merge_unique" in new:
+ new.remove("dynaconf_merge_unique")
+ unique = True
+
+ for item in old[::-1]:
+ if unique and item in new:
+ continue
+ new.insert(0, item)
+
+ if isinstance(old, dict) and isinstance(new, dict):
+ existing_value = recursive_get(old, full_path) # doesn't handle None
+ # Need to make every `None` on `_store` to be an wrapped `LazyNone`
+
+ # data coming from source, in `new` can be mix case: KEY4|key4|Key4
+ # data existing on `old` object has the correct case: key4|KEY4|Key4
+ # So we need to ensure that new keys matches the existing keys
+ for new_key in list(new.keys()):
+ correct_case_key = find_the_correct_casing(new_key, old)
+ if correct_case_key:
+ new[correct_case_key] = new.pop(new_key)
+
+ for old_key, value in old.items():
+
+ # This is for when the dict exists internally
+ # but the new value on the end of full path is the same
+ if (
+ existing_value is not None
+ and old_key.lower() == full_path[-1].lower()
+ and existing_value is value
+ ):
+ # Here Be The Dragons
+ # This comparison needs to be smarter
+ continue
+
+ if old_key not in new:
+ new[old_key] = value
+ else:
+ object_merge(
+ value,
+ new[old_key],
+ full_path=full_path[1:] if full_path else None,
+ )
+
+ handle_metavalues(old, new)
+
+ return new
+
+
+def recursive_get(
+ obj: DynaBox | dict[str, int] | dict[str, str | int],
+ names: list[str] | None,
+) -> Any:
+ """Given a dot accessible object and a list of names `foo.bar.zaz`
+ gets recursively all names one by one obj.foo.bar.zaz.
+ """
+ if not names:
+ return
+ head, *tail = names
+ result = getattr(obj, head, None)
+ if not tail:
+ return result
+ return recursive_get(result, tail)
+
+
+def handle_metavalues(
+ old: DynaBox | dict[str, int] | dict[str, str | int], new: Any
+) -> None:
+ """Cleanup of MetaValues on new dict"""
+
+ for key in list(new.keys()):
+
+ # MetaValue instances
+ if getattr(new[key], "_dynaconf_reset", False): # pragma: no cover
+ # a Reset on `new` triggers reasign of existing data
+ new[key] = new[key].unwrap()
+ elif getattr(new[key], "_dynaconf_del", False):
+ # a Del on `new` triggers deletion of existing data
+ new.pop(key, None)
+ old.pop(key, None)
+ elif getattr(new[key], "_dynaconf_merge", False):
+ # a Merge on `new` triggers merge with existing data
+ new[key] = object_merge(
+ old.get(key), new[key].unwrap(), unique=new[key].unique
+ )
+
+ # Data structures containing merge tokens
+ if isinstance(new.get(key), (list, tuple)):
+ has_merge = "dynaconf_merge" in new[key]
+ has_merge_unique = "dynaconf_merge_unique" in new[key]
+ if has_merge or has_merge_unique:
+ value = list(new[key])
+ unique = False
+
+ try:
+ value.remove("dynaconf_merge")
+ except ValueError:
+ value.remove("dynaconf_merge_unique")
+ unique = True
+
+ for item in old.get(key)[::-1]:
+ if unique and item in value:
+ continue
+ value.insert(0, item)
+
+ new[key] = value
+
+ elif isinstance(new.get(key), dict):
+ local_merge = new[key].pop(
+ "dynaconf_merge", new[key].pop("dynaconf_merge_unique", None)
+ )
+ if local_merge not in (True, False, None) and not new[key]:
+ # In case `dynaconf_merge:` holds value not boolean - ref #241
+ new[key] = local_merge
+
+ if local_merge:
+ new[key] = object_merge(old.get(key), new[key])
+
+
+class DynaconfDict(dict):
+ """A dict representing en empty Dynaconf object
+ useful to run loaders in to a dict for testing"""
+
+ def __init__(self, *args, **kwargs):
+ self._fresh = False
+ self._loaded_envs = []
+ self._loaded_hooks = defaultdict(dict)
+ self._loaded_py_modules = []
+ self._loaded_files = []
+ self._deleted = set()
+ self._store = {}
+ self._env_cache = {}
+ self._loaded_by_loaders = {}
+ self._loaders = []
+ self._defaults = {}
+ self.environ = os.environ
+ self.SETTINGS_MODULE = None
+ self.filter_strategy = kwargs.get("filter_strategy", None)
+ self._not_installed_warnings = []
+ self._validate_only = kwargs.pop("validate_only", None)
+ self._validate_exclude = kwargs.pop("validate_exclude", None)
+ super().__init__(*args, **kwargs)
+
+ def set(self, key: str, value: str, *args, **kwargs) -> None:
+ self[key] = value
+
+ @staticmethod
+ def get_environ(key, default=None): # pragma: no cover
+ return os.environ.get(key, default)
+
+ def exists(self, key: str, **kwargs) -> bool:
+ return self.get(key, missing) is not missing
+
+
+RENAMED_VARS = {
+ # old: new
+ "DYNACONF_NAMESPACE": "ENV_FOR_DYNACONF",
+ "NAMESPACE_FOR_DYNACONF": "ENV_FOR_DYNACONF",
+ "DYNACONF_SETTINGS_MODULE": "SETTINGS_FILE_FOR_DYNACONF",
+ "DYNACONF_SETTINGS": "SETTINGS_FILE_FOR_DYNACONF",
+ "SETTINGS_MODULE": "SETTINGS_FILE_FOR_DYNACONF",
+ "SETTINGS_MODULE_FOR_DYNACONF": "SETTINGS_FILE_FOR_DYNACONF",
+ "PROJECT_ROOT": "ROOT_PATH_FOR_DYNACONF",
+ "PROJECT_ROOT_FOR_DYNACONF": "ROOT_PATH_FOR_DYNACONF",
+ "DYNACONF_SILENT_ERRORS": "SILENT_ERRORS_FOR_DYNACONF",
+ "DYNACONF_ALWAYS_FRESH_VARS": "FRESH_VARS_FOR_DYNACONF",
+ "BASE_NAMESPACE_FOR_DYNACONF": "DEFAULT_ENV_FOR_DYNACONF",
+ "GLOBAL_ENV_FOR_DYNACONF": "ENVVAR_PREFIX_FOR_DYNACONF",
+}
+
+
+def compat_kwargs(kwargs: dict[str, Any]) -> None:
+ """To keep backwards compat change the kwargs to new names"""
+ warn_deprecations(kwargs)
+ for old, new in RENAMED_VARS.items():
+ if old in kwargs:
+ kwargs[new] = kwargs[old]
+ # update cross references
+ for c_old, c_new in RENAMED_VARS.items():
+ if c_new == new:
+ kwargs[c_old] = kwargs[new]
+
+
+class Missing:
+ """
+ Sentinel value object/singleton used to differentiate between ambiguous
+ situations where `None` is a valid value.
+ """
+
+ def __bool__(self) -> bool:
+ """Respond to boolean duck-typing."""
+ return False
+
+ def __eq__(self, other: DynaBox | Missing) -> bool:
+ """Equality check for a singleton."""
+
+ return isinstance(other, self.__class__)
+
+ # Ensure compatibility with Python 2.x
+ __nonzero__ = __bool__
+
+ def __repr__(self) -> str:
+ """
+ Unambiguously identify this string-based representation of Missing,
+ used as a singleton.
+ """
+ return "<dynaconf.missing>"
+
+
+missing = Missing()
+
+
+def deduplicate(list_object: list[str]) -> list[str]:
+ """Rebuild `list_object` removing duplicated and keeping order"""
+ new = []
+ for item in list_object:
+ if item not in new:
+ new.append(item)
+ return new
+
+
+def warn_deprecations(data: Any) -> None:
+ for old, new in RENAMED_VARS.items():
+ if old in data:
+ warnings.warn(
+ f"You are using {old} which is a deprecated settings "
+ f"replace it with {new}",
+ DeprecationWarning,
+ )
+
+
+def trimmed_split(
+ s: str, seps: str | tuple[str, str] = (";", ",")
+) -> list[str]:
+ """Given a string s, split is by one of one of the seps."""
+ for sep in seps:
+ if sep not in s:
+ continue
+ data = [item.strip() for item in s.strip().split(sep)]
+ return data
+ return [s] # raw un-splitted
+
+
+def ensure_a_list(data: Any) -> list[int] | list[str]:
+ """Ensure data is a list or wrap it in a list"""
+ if not data:
+ return []
+ if isinstance(data, (list, tuple, set)):
+ return list(data)
+ if isinstance(data, str):
+ data = trimmed_split(data) # settings.toml,other.yaml
+ return data
+ return [data]
+
+
+def build_env_list(obj: Settings | LazySettings, env: str | None) -> list[str]:
+ """Build env list for loaders to iterate.
+
+ Arguments:
+ obj {LazySettings} -- A Dynaconf settings instance
+ env {str} -- The current env to be loaded
+
+ Returns:
+ [str] -- A list of string names of the envs to load.
+ """
+ # add the [default] env
+ env_list = [(obj.get("DEFAULT_ENV_FOR_DYNACONF") or "default").lower()]
+
+ # compatibility with older versions that still uses [dynaconf] as
+ # [default] env
+ global_env = (obj.get("ENVVAR_PREFIX_FOR_DYNACONF") or "dynaconf").lower()
+ if global_env not in env_list:
+ env_list.append(global_env)
+
+ # add the current env
+ current_env = obj.current_env
+ if current_env and current_env.lower() not in env_list:
+ env_list.append(current_env.lower())
+
+ # add a manually set env
+ if env and env.lower() not in env_list:
+ env_list.append(env.lower())
+
+ # add the [global] env
+ env_list.append("global")
+
+ return env_list
+
+
+def upperfy(key: str) -> str:
+ """Receive a string key and returns its upper version.
+
+ Example:
+
+ input: foo
+ output: FOO
+
+ input: foo_bar
+ output: FOO_BAR
+
+ input: foo__bar__ZAZ
+ output: FOO__bar__ZAZ
+
+ Arguments:
+ key {str} -- A string key that may contain dunders `__`
+
+ Returns:
+ The key as upper case but keeping the nested elements.
+ """
+ key = str(key)
+ if "__" in key:
+ parts = key.split("__")
+ return "__".join([parts[0].upper()] + parts[1:])
+ return key.upper()
+
+
+def multi_replace(text: str, patterns: dict[str, str]) -> str:
+ """Replaces multiple pairs in a string
+
+ Arguments:
+ text {str} -- A "string text"
+ patterns {dict} -- A dict of {"old text": "new text"}
+
+ Returns:
+ text -- str
+ """
+ for old, new in patterns.items():
+ text = text.replace(old, new)
+ return text
+
+
+def extract_json_objects(
+ text: str, decoder: JSONDecoder = JSONDecoder()
+) -> Iterator[dict[str, int | dict[Any, Any]]]:
+ """Find JSON objects in text, and yield the decoded JSON data
+
+ Does not attempt to look for JSON arrays, text, or other JSON types outside
+ of a parent JSON object.
+
+ """
+ pos = 0
+ while True:
+ match = text.find("{", pos)
+ if match == -1:
+ break
+ try:
+ result, index = decoder.raw_decode(text[match:])
+ yield result
+ pos = match + index
+ except ValueError:
+ pos = match + 1
+
+
+def recursively_evaluate_lazy_format(
+ value: Any, settings: Settings | LazySettings
+) -> Any:
+ """Given a value as a data structure, traverse all its members
+ to find Lazy values and evaluate it.
+
+ For example: Evaluate values inside lists and dicts
+ """
+
+ if getattr(value, "_dynaconf_lazy_format", None):
+ value = value(settings)
+
+ if isinstance(value, list):
+ # Keep the original type, can be a BoxList
+ value = value.__class__(
+ [
+ recursively_evaluate_lazy_format(item, settings)
+ for item in value
+ ]
+ )
+
+ return value
+
+
+def isnamedtupleinstance(value):
+ """Check if value is a namedtuple instance
+
+ stackoverflow.com/questions/2166818/
+ how-to-check-if-an-object-is-an-instance-of-a-namedtuple
+ """
+
+ t = type(value)
+ b = t.__bases__
+ if len(b) != 1 or b[0] != tuple:
+ return False
+ f = getattr(t, "_fields", None)
+ if not isinstance(f, tuple):
+ return False
+ return all(type(n) == str for n in f)
+
+
+def find_the_correct_casing(key: str, data: dict[str, Any]) -> str | None:
+ """Given a key, find the proper casing in data
+
+ Arguments:
+ key {str} -- A key to be searched in data
+ data {dict} -- A dict to be searched
+
+ Returns:
+ str -- The proper casing of the key in data
+ """
+ if key in data:
+ return key
+ for k in data.keys():
+ if k.lower() == key.lower():
+ return k
+ if k.replace(" ", "_").lower() == key.lower():
+ return k
+ return None
diff --git a/libs/dynaconf/utils/boxing.py b/libs/dynaconf/utils/boxing.py
new file mode 100644
index 000000000..ff78f1246
--- /dev/null
+++ b/libs/dynaconf/utils/boxing.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+import inspect
+from functools import wraps
+
+from dynaconf.utils import find_the_correct_casing
+from dynaconf.utils import recursively_evaluate_lazy_format
+from dynaconf.utils.functional import empty
+from dynaconf.vendor.box import Box
+
+
+def evaluate_lazy_format(f):
+ """Marks a method on Dynabox instance to
+ lazily evaluate LazyFormat objects upon access."""
+
+ @wraps(f)
+ def evaluate(dynabox, item, *args, **kwargs):
+ value = f(dynabox, item, *args, **kwargs)
+ settings = dynabox._box_config["box_settings"]
+
+ if getattr(value, "_dynaconf_lazy_format", None):
+ dynabox._box_config[
+ f"raw_{item.lower()}"
+ ] = f"@{value.formatter.token} {value.value}"
+
+ return recursively_evaluate_lazy_format(value, settings)
+
+ return evaluate
+
+
+class DynaBox(Box):
+ """Specialized Box for dynaconf
+ it allows items/attrs to be found both in upper or lower case"""
+
+ @evaluate_lazy_format
+ def __getattr__(self, item, *args, **kwargs):
+ try:
+ return super().__getattr__(item, *args, **kwargs)
+ except (AttributeError, KeyError):
+ n_item = find_the_correct_casing(item, self) or item
+ return super().__getattr__(n_item, *args, **kwargs)
+
+ @evaluate_lazy_format
+ def __getitem__(self, item, *args, **kwargs):
+ try:
+ return super().__getitem__(item, *args, **kwargs)
+ except (AttributeError, KeyError):
+ n_item = find_the_correct_casing(item, self) or item
+ return super().__getitem__(n_item, *args, **kwargs)
+
+ def __copy__(self):
+ return self.__class__(
+ super(Box, self).copy(),
+ box_settings=self._box_config.get("box_settings"),
+ )
+
+ def copy(self):
+ return self.__class__(
+ super(Box, self).copy(),
+ box_settings=self._box_config.get("box_settings"),
+ )
+
+ @evaluate_lazy_format
+ def get(self, item, default=None, *args, **kwargs):
+ n_item = find_the_correct_casing(item, self) or item
+ value = super().get(n_item, empty, *args, **kwargs)
+ return value if value is not empty else default
+
+ def __dir__(self):
+ keys = list(self.keys())
+ reserved = [
+ item[0]
+ for item in inspect.getmembers(DynaBox)
+ if not item[0].startswith("__")
+ ]
+ return (
+ keys
+ + [k.lower() for k in keys]
+ + [k.upper() for k in keys]
+ + reserved
+ )
diff --git a/libs/dynaconf/utils/files.py b/libs/dynaconf/utils/files.py
new file mode 100644
index 000000000..ec6fbd851
--- /dev/null
+++ b/libs/dynaconf/utils/files.py
@@ -0,0 +1,112 @@
+from __future__ import annotations
+
+import inspect
+import io
+import os
+
+from dynaconf.utils import deduplicate
+
+
+def _walk_to_root(path, break_at=None):
+ """
+ Directories starting from the given directory up to the root or break_at
+ """
+ if not os.path.exists(path): # pragma: no cover
+ raise OSError("Starting path not found")
+
+ if os.path.isfile(path): # pragma: no cover
+ path = os.path.dirname(path)
+
+ last_dir = None
+ current_dir = os.path.abspath(path)
+ paths = []
+ while last_dir != current_dir:
+ paths.append(current_dir)
+ paths.append(os.path.join(current_dir, "config"))
+ if break_at and current_dir == os.path.abspath(break_at): # noqa
+ break
+ parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
+ last_dir, current_dir = current_dir, parent_dir
+ return paths
+
+
+SEARCHTREE = []
+
+
+def find_file(filename=".env", project_root=None, skip_files=None, **kwargs):
+ """Search in increasingly higher folders for the given file
+ Returns path to the file if found, or an empty string otherwise.
+
+ This function will build a `search_tree` based on:
+
+ - Project_root if specified
+ - Invoked script location and its parents until root
+ - Current working directory
+
+ For each path in the `search_tree` it will also look for an
+ additional `./config` folder.
+ """
+ search_tree = []
+ try:
+ work_dir = os.getcwd()
+ except FileNotFoundError:
+ return ""
+ skip_files = skip_files or []
+
+ # If filename is an absolute path and exists, just return it
+ # if the absolute path does not exist, return empty string so
+ # that it can be joined and avoid IoError
+ if os.path.isabs(filename):
+ return filename if os.path.exists(filename) else ""
+
+ if project_root is not None:
+ search_tree.extend(_walk_to_root(project_root, break_at=work_dir))
+
+ script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))
+
+ # Path to invoked script and recursively to root with its ./config dirs
+ search_tree.extend(_walk_to_root(script_dir))
+
+ # Path to where Python interpreter was invoked and recursively to root
+ search_tree.extend(_walk_to_root(work_dir))
+
+ # Don't look the same place twice
+ search_tree = deduplicate(search_tree)
+
+ global SEARCHTREE
+ SEARCHTREE[:] = search_tree
+
+ for dirname in search_tree:
+ check_path = os.path.join(dirname, filename)
+ if check_path in skip_files:
+ continue
+ if os.path.exists(check_path):
+ return check_path # First found will return
+
+ # return empty string if not found so it can still be joined in os.path
+ return ""
+
+
+def read_file(path, **kwargs):
+ content = ""
+ with open(path, **kwargs) as open_file:
+ content = open_file.read().strip()
+ return content
+
+
+def get_local_filename(filename):
+ """Takes a filename like `settings.toml` and returns `settings.local.toml`
+
+ Arguments:
+ filename {str} -- The filename or complete path
+
+ Returns:
+ [str] -- The same name or path with `.local.` added.
+ """
+ name, _, extension = os.path.basename(str(filename)).rpartition(
+ os.path.extsep
+ )
+
+ return os.path.join(
+ os.path.dirname(str(filename)), f"{name}.local.{extension}"
+ )
diff --git a/libs/dynaconf/utils/functional.py b/libs/dynaconf/utils/functional.py
new file mode 100644
index 000000000..c9a93afc8
--- /dev/null
+++ b/libs/dynaconf/utils/functional.py
@@ -0,0 +1,136 @@
+from __future__ import annotations
+
+import copy
+import operator
+
+
+class Empty:
+ def __str__(self):
+ return "EMPTY"
+
+
+empty = Empty()
+
+
+def new_method_proxy(func):
+ def inner(self, *args):
+ if self._wrapped is empty:
+ self._setup()
+ return func(self._wrapped, *args)
+
+ return inner
+
+
+class LazyObject:
+ """
+ A wrapper for another class that can be used to delay instantiation of the
+ wrapped class.
+
+ By subclassing, you have the opportunity to intercept and alter the
+ instantiation.
+ """
+
+ # Avoid infinite recursion when tracing __init__.
+ _wrapped = None
+ _kwargs = None
+ _django_override = False
+
+ def __init__(self):
+ # Note: if a subclass overrides __init__(), it will likely need to
+ # override __copy__() and __deepcopy__() as well.
+ self._wrapped = empty
+
+ __getattr__ = new_method_proxy(getattr)
+
+ def __setattr__(self, name, value):
+ if name in ["_wrapped", "_kwargs", "_warn_dynaconf_global_settings"]:
+ # Assign to __dict__ to avoid infinite __setattr__ loops.
+ self.__dict__[name] = value
+ else:
+ if self._wrapped is empty:
+ self._setup()
+ setattr(self._wrapped, name, value)
+
+ def __delattr__(self, name):
+ if name in ["_wrapped", "_kwargs"]:
+ raise TypeError(f"can't delete {name}.")
+ if self._wrapped is empty:
+ self._setup()
+ delattr(self._wrapped, name)
+
+ def _setup(self):
+ """
+ Must be implemented by subclasses to initialize the wrapped object.
+ """
+ raise NotImplementedError(
+ "subclasses of LazyObject must provide a _setup() method"
+ )
+
+ # Because we have messed with __class__ below, we confuse pickle as to what
+ # class we are pickling. We're going to have to initialize the wrapped
+ # object to successfully pickle it, so we might as well just pickle the
+ # wrapped object since they're supposed to act the same way.
+ #
+ # Unfortunately, if we try to simply act like the wrapped object, the ruse
+ # will break down when pickle gets our id(). Thus we end up with pickle
+ # thinking, in effect, that we are a distinct object from the wrapped
+ # object, but with the same __dict__. This can cause problems (see #25389).
+ #
+ # So instead, we define our own __reduce__ method and custom unpickler. We
+ # pickle the wrapped object as the unpickler's argument, so that pickle
+ # will pickle it normally, and then the unpickler simply returns its
+ # argument.
+ def __reduce__(self):
+ if self._wrapped is empty:
+ self._setup()
+ return (unpickle_lazyobject, (self._wrapped,))
+
+ def __copy__(self):
+ if self._wrapped is empty:
+ # If uninitialized, copy the wrapper. Use type(self), not
+ # self.__class__, because the latter is proxied.
+ return type(self)()
+ else:
+ # If initialized, return a copy of the wrapped object.
+ return copy.copy(self._wrapped)
+
+ def __deepcopy__(self, memo):
+ if self._wrapped is empty:
+ # We have to use type(self), not self.__class__, because the
+ # latter is proxied.
+ result = type(self)()
+ memo[id(self)] = result
+ return result
+ return copy.deepcopy(self._wrapped, memo)
+
+ __bytes__ = new_method_proxy(bytes)
+ __str__ = new_method_proxy(str)
+ __bool__ = new_method_proxy(bool)
+
+ # Introspection support
+ __dir__ = new_method_proxy(dir)
+
+ # Need to pretend to be the wrapped class, for the sake of objects that
+ # care about this (especially in equality tests)
+ __class__ = property(new_method_proxy(operator.attrgetter("__class__")))
+ __eq__ = new_method_proxy(operator.eq)
+ __lt__ = new_method_proxy(operator.lt)
+ __gt__ = new_method_proxy(operator.gt)
+ __ne__ = new_method_proxy(operator.ne)
+ __hash__ = new_method_proxy(hash)
+
+ # List/Tuple/Dictionary methods support
+ __getitem__ = new_method_proxy(operator.getitem)
+ __setitem__ = new_method_proxy(operator.setitem)
+ __delitem__ = new_method_proxy(operator.delitem)
+ __iter__ = new_method_proxy(iter)
+ __len__ = new_method_proxy(len)
+ __contains__ = new_method_proxy(operator.contains)
+
+
+def unpickle_lazyobject(wrapped):
+ """
+ Used to unpickle lazy objects. Just return its argument, which will be the
+ wrapped object.
+ """
+ return wrapped
diff --git a/libs/dynaconf/utils/parse_conf.py b/libs/dynaconf/utils/parse_conf.py
new file mode 100644
index 000000000..ac3262d5d
--- /dev/null
+++ b/libs/dynaconf/utils/parse_conf.py
@@ -0,0 +1,401 @@
+from __future__ import annotations
+
+import json
+import os
+import re
+import warnings
+from functools import wraps
+
+from dynaconf.utils import extract_json_objects
+from dynaconf.utils import isnamedtupleinstance
+from dynaconf.utils import multi_replace
+from dynaconf.utils import recursively_evaluate_lazy_format
+from dynaconf.utils.boxing import DynaBox
+from dynaconf.utils.functional import empty
+from dynaconf.vendor import toml
+from dynaconf.vendor import tomllib
+
+try:
+ from jinja2 import Environment
+
+ jinja_env = Environment()
+ for p_method in ("abspath", "realpath", "relpath", "dirname", "basename"):
+ jinja_env.filters[p_method] = getattr(os.path, p_method)
+except ImportError: # pragma: no cover
+ jinja_env = None
+
+true_values = ("t", "true", "enabled", "1", "on", "yes", "True")
+false_values = ("f", "false", "disabled", "0", "off", "no", "False", "")
+
+
+KV_PATTERN = re.compile(r"([a-zA-Z0-9 ]*=[a-zA-Z0-9\- :]*)")
+"""matches `a=b, c=d, e=f` used on `VALUE='@merge foo=bar'` variables."""
+
+
+class DynaconfParseError(Exception):
+ """Error to raise when parsing @casts"""
+
+
+class MetaValue:
+ """A Marker to trigger specific actions on `set` and `object_merge`"""
+
+ _meta_value = True
+
+ def __init__(self, value, box_settings):
+ self.box_settings = box_settings
+ self.value = parse_conf_data(
+ value, tomlfy=True, box_settings=box_settings
+ )
+
+ def __repr__(self):
+ return f"{self.__class__.__name__}({self.value}) on {id(self)}"
+
+ def unwrap(self):
+ return self.value
+
+
+class Reset(MetaValue):
+ """Triggers an existing key to be reset to its value
+ NOTE: DEPRECATED on v3.0.0
+ """
+
+ _dynaconf_reset = True
+
+ def __init__(self, value, box_settings):
+ self.box_settings = box_settings
+ self.value = parse_conf_data(
+ value, tomlfy=True, box_settings=self.box_settings
+ )
+ warnings.warn(f"{self.value} does not need `@reset` anymore.")
+
+
+class Del(MetaValue):
+ """Triggers an existing key to be deleted"""
+
+ _dynaconf_del = True
+
+ def unwrap(self):
+ raise ValueError("Del object has no value")
+
+
+class Merge(MetaValue):
+ """Triggers an existing key to be merged"""
+
+ _dynaconf_merge = True
+
+ def __init__(self, value, box_settings, unique=False):
+ if unique:
+ self._dynaconf_merge_unique = True
+
+ self.box_settings = box_settings
+
+ self.value = parse_conf_data(
+ value, tomlfy=True, box_settings=box_settings
+ )
+
+ if isinstance(self.value, (int, float, bool)):
+ # @merge 1, @merge 1.1, @merge False
+ self.value = [self.value]
+ elif isinstance(self.value, str):
+ # @merge {"valid": "json"}
+ json_object = list(
+ extract_json_objects(
+ multi_replace(
+ self.value,
+ {
+ ": True": ": true",
+ ":True": ": true",
+ ": False": ": false",
+ ":False": ": false",
+ ": None": ": null",
+ ":None": ": null",
+ },
+ )
+ )
+ )
+ if len(json_object) == 1:
+ self.value = json_object[0]
+ else:
+ matches = KV_PATTERN.findall(self.value)
+ # a=b, c=d
+ if matches:
+ self.value = {
+ k.strip(): parse_conf_data(
+ v, tomlfy=True, box_settings=box_settings
+ )
+ for k, v in (
+ match.strip().split("=") for match in matches
+ )
+ }
+ elif "," in self.value:
+ # @merge foo,bar
+ self.value = self.value.split(",")
+ else:
+ # @merge foo
+ self.value = [self.value]
+
+ self.unique = unique
+
+
+class BaseFormatter:
+ def __init__(self, function, token):
+ self.function = function
+ self.token = token
+
+ def __call__(self, value, **context):
+ return self.function(value, **context)
+
+ def __str__(self):
+ return str(self.token)
+
+
+def _jinja_formatter(value, **context):
+ if jinja_env is None: # pragma: no cover
+ raise ImportError(
+ "jinja2 must be installed to enable '@jinja' settings in dynaconf"
+ )
+ return jinja_env.from_string(value).render(**context)
+
+
+class Formatters:
+ """Dynaconf builtin formatters"""
+
+ python_formatter = BaseFormatter(str.format, "format")
+ jinja_formatter = BaseFormatter(_jinja_formatter, "jinja")
+
+
+class Lazy:
+ """Holds data to format lazily."""
+
+ _dynaconf_lazy_format = True
+
+ def __init__(
+ self, value=empty, formatter=Formatters.python_formatter, casting=None
+ ):
+ self.value = value
+ self.formatter = formatter
+ self.casting = casting
+
+ @property
+ def context(self):
+ """Builds a context for formatting."""
+ return {"env": os.environ, "this": self.settings}
+
+ def __call__(self, settings, validator_object=None):
+ """LazyValue triggers format lazily."""
+ self.settings = settings
+ self.context["_validator_object"] = validator_object
+ result = self.formatter(self.value, **self.context)
+ if self.casting is not None:
+ result = self.casting(result)
+ return result
+
+ def __str__(self):
+ """Gives string representation for the object."""
+ return str(self.value)
+
+ def __repr__(self):
+ """Give the quoted str representation"""
+ return f"'@{self.formatter} {self.value}'"
+
+ def _dynaconf_encode(self):
+ """Encodes this object values to be serializable to json"""
+ return f"@{self.formatter} {self.value}"
+
+ def set_casting(self, casting):
+ """Set the casting and return the instance."""
+ self.casting = casting
+ return self
+
+
+def try_to_encode(value, callback=str):
+ """Tries to encode a value by verifying existence of `_dynaconf_encode`"""
+ try:
+ return value._dynaconf_encode()
+ except (AttributeError, TypeError):
+ return callback(value)
+
+
+def evaluate_lazy_format(f):
+ """Marks a method on Settings instance to
+ lazily evaluate LazyFormat objects upon access."""
+
+ @wraps(f)
+ def evaluate(settings, *args, **kwargs):
+ value = f(settings, *args, **kwargs)
+ return recursively_evaluate_lazy_format(value, settings)
+
+ return evaluate
+
+
+converters = {
+ "@str": lambda value: value.set_casting(str)
+ if isinstance(value, Lazy)
+ else str(value),
+ "@int": lambda value: value.set_casting(int)
+ if isinstance(value, Lazy)
+ else int(value),
+ "@float": lambda value: value.set_casting(float)
+ if isinstance(value, Lazy)
+ else float(value),
+ "@bool": lambda value: value.set_casting(
+ lambda x: str(x).lower() in true_values
+ )
+ if isinstance(value, Lazy)
+ else str(value).lower() in true_values,
+ "@json": lambda value: value.set_casting(
+ lambda x: json.loads(x.replace("'", '"'))
+ )
+ if isinstance(value, Lazy)
+ else json.loads(value),
+ "@format": lambda value: Lazy(value),
+ "@jinja": lambda value: Lazy(value, formatter=Formatters.jinja_formatter),
+ # Meta Values to trigger pre assignment actions
+ "@reset": Reset, # @reset is DEPRECATED on v3.0.0
+ "@del": Del,
+ "@merge": Merge,
+ "@merge_unique": lambda value, box_settings: Merge(
+ value, box_settings, unique=True
+ ),
+ # Special markers to be used as placeholders e.g: in prefilled forms
+ # will always return None when evaluated
+ "@note": lambda value: None,
+ "@comment": lambda value: None,
+ "@null": lambda value: None,
+ "@none": lambda value: None,
+ "@empty": lambda value: empty,
+}
+
+
+def get_converter(converter_key, value, box_settings):
+ converter = converters[converter_key]
+ try:
+ converted_value = converter(value, box_settings=box_settings)
+ except TypeError:
+ converted_value = converter(value)
+ return converted_value
+
+
+def parse_with_toml(data):
+ """Uses TOML syntax to parse data"""
+ try: # try tomllib first
+ try:
+ return tomllib.loads(f"key={data}")["key"]
+ except (tomllib.TOMLDecodeError, KeyError):
+ return data
+ except UnicodeDecodeError: # pragma: no cover
+ # fallback to toml (TBR in 4.0.0)
+ try:
+ return toml.loads(f"key={data}")["key"]
+ except (toml.TomlDecodeError, KeyError):
+ return data
+ warnings.warn(
+ "TOML files should have only UTF-8 encoded characters. "
+ "starting on 4.0.0 dynaconf will stop allowing invalid chars.",
+ DeprecationWarning,
+ )
+
+
+def _parse_conf_data(data, tomlfy=False, box_settings=None):
+ """
+ @int @bool @float @json (for lists and dicts)
+ strings does not need converters
+
+ export DYNACONF_DEFAULT_THEME='material'
+ export DYNACONF_DEBUG='@bool True'
+ export DYNACONF_DEBUG_TOOLBAR_ENABLED='@bool False'
+ export DYNACONF_PAGINATION_PER_PAGE='@int 20'
+ export DYNACONF_MONGODB_SETTINGS='@json {"DB": "quokka_db"}'
+ export DYNACONF_ALLOWED_EXTENSIONS='@json ["jpg", "png"]'
+ """
+ # not enforced to not break backwards compatibility with custom loaders
+ box_settings = box_settings or {}
+
+ castenabled = box_settings.get("AUTO_CAST_FOR_DYNACONF", empty)
+ if castenabled is empty:
+ castenabled = (
+ os.environ.get("AUTO_CAST_FOR_DYNACONF", "true").lower()
+ not in false_values
+ )
+
+ if (
+ castenabled
+ and data
+ and isinstance(data, str)
+ and data.startswith(tuple(converters.keys()))
+ ):
+ # Check combination token is used
+ comb_token = re.match(
+ f"^({'|'.join(converters.keys())}) @(jinja|format)",
+ data,
+ )
+ if comb_token:
+ tokens = comb_token.group(0)
+ converter_key_list = tokens.split(" ")
+ value = data.replace(tokens, "").strip()
+ else:
+ parts = data.partition(" ")
+ converter_key_list = [parts[0]]
+ value = parts[-1]
+
+ # Parse the converters iteratively
+ for converter_key in converter_key_list[::-1]:
+ value = get_converter(converter_key, value, box_settings)
+ else:
+ value = parse_with_toml(data) if tomlfy else data
+
+ if isinstance(value, dict):
+ value = DynaBox(value, box_settings=box_settings)
+
+ return value
+
+
+def parse_conf_data(data, tomlfy=False, box_settings=None):
+
+ # fix for https://github.com/dynaconf/dynaconf/issues/595
+ if isnamedtupleinstance(data):
+ return data
+
+ # not enforced to not break backwards compatibility with custom loaders
+ box_settings = box_settings or {}
+
+ if isinstance(data, (tuple, list)):
+ # recursively parse each sequence item
+ return [
+ parse_conf_data(item, tomlfy=tomlfy, box_settings=box_settings)
+ for item in data
+ ]
+
+ if isinstance(data, (dict, DynaBox)):
+ # recursively parse inner dict items
+ _parsed = {}
+ for k, v in data.items():
+ _parsed[k] = parse_conf_data(
+ v, tomlfy=tomlfy, box_settings=box_settings
+ )
+ return _parsed
+
+ # return parsed string value
+ return _parse_conf_data(data, tomlfy=tomlfy, box_settings=box_settings)
+
+
+def unparse_conf_data(value):
+ if isinstance(value, bool):
+ return f"@bool {value}"
+
+ if isinstance(value, int):
+ return f"@int {value}"
+
+ if isinstance(value, float):
+ return f"@float {value}"
+
+ if isinstance(value, (list, dict)):
+ return f"@json {json.dumps(value)}"
+
+ if isinstance(value, Lazy):
+ return try_to_encode(value)
+
+ if value is None:
+ return "@none "
+
+ return value
diff --git a/libs/dynaconf/validator.py b/libs/dynaconf/validator.py
new file mode 100644
index 000000000..b85269ff6
--- /dev/null
+++ b/libs/dynaconf/validator.py
@@ -0,0 +1,498 @@
+from __future__ import annotations
+
+from collections import defaultdict
+from itertools import chain
+from types import MappingProxyType
+from typing import Any
+from typing import Callable
+from typing import Sequence
+
+from dynaconf import validator_conditions
+from dynaconf.utils import ensure_a_list
+from dynaconf.utils.functional import empty
+
+
+EQUALITY_ATTRS = (
+ "names",
+ "must_exist",
+ "when",
+ "condition",
+ "operations",
+ "envs",
+)
+
+
+class ValidationError(Exception):
+ """Raised when a validation fails"""
+
+ def __init__(self, message: str, *args, **kwargs):
+ self.details = kwargs.pop("details", [])
+ super().__init__(message, *args, **kwargs)
+ self.message = message
+
+
+class Validator:
+ """Validators are conditions attached to settings variables names
+ or patterns::
+
+ Validator('MESSAGE', must_exist=True, eq='Hello World')
+
+ The above ensure MESSAGE is available in default env and
+ is equal to 'Hello World'
+
+ `names` are a one (or more) names or patterns::
+
+ Validator('NAME')
+ Validator('NAME', 'OTHER_NAME', 'EVEN_OTHER')
+ Validator(r'^NAME', r'OTHER./*')
+
+ The `operations` are::
+
+ eq: value == other
+ ne: value != other
+ gt: value > other
+ lt: value < other
+ gte: value >= other
+ lte: value <= other
+ is_type_of: isinstance(value, type)
+ is_in: value in sequence
+ is_not_in: value not in sequence
+ identity: value is other
+ cont: contain value in
+ len_eq: len(value) == other
+ len_ne: len(value) != other
+ len_min: len(value) > other
+ len_max: len(value) < other
+
+ `env` is which env to be checked, can be a list or
+ default is used.
+
+ `when` holds a validator and its return decides if validator runs or not::
+
+ Validator('NAME', must_exist=True, when=Validator('OTHER', eq=2))
+ # NAME is required only if OTHER eq to 2
+ # When the very first thing to be performed when passed.
+ # if no env is passed to `when` it is inherited
+
+ `must_exist` is alias to `required` requirement. (executed after when)::
+
+ settings.get(value, empty) returns non empty
+
+ condition is a callable to be executed and return boolean::
+
+ Validator('NAME', condition=lambda x: x == 1)
+ # it is executed before operations.
+
+ """
+
+ default_messages = MappingProxyType(
+ {
+ "must_exist_true": "{name} is required in env {env}",
+ "must_exist_false": "{name} cannot exists in env {env}",
+ "condition": "{name} invalid for {function}({value}) in env {env}",
+ "operations": (
+ "{name} must {operation} {op_value} "
+ "but it is {value} in env {env}"
+ ),
+ "combined": "combined validators failed {errors}",
+ }
+ )
+
+ def __init__(
+ self,
+ *names: str,
+ must_exist: bool | None = None,
+ required: bool | None = None, # alias for `must_exist`
+ condition: Callable[[Any], bool] | None = None,
+ when: Validator | None = None,
+ env: str | Sequence[str] | None = None,
+ messages: dict[str, str] | None = None,
+ cast: Callable[[Any], Any] | None = None,
+ default: Any | Callable[[Any, Validator], Any] | None = empty,
+ description: str | None = None,
+ apply_default_on_none: bool | None = False,
+ **operations: Any,
+ ) -> None:
+ # Copy immutable MappingProxyType as a mutable dict
+ self.messages = dict(self.default_messages)
+ if messages:
+ self.messages.update(messages)
+
+ if when is not None and not isinstance(when, Validator):
+ raise TypeError("when must be Validator instance")
+
+ if condition is not None and not callable(condition):
+ raise TypeError("condition must be callable")
+
+ self.names = names
+ self.must_exist = must_exist if must_exist is not None else required
+ self.condition = condition
+ self.when = when
+ self.cast = cast or (lambda value: value)
+ self.operations = operations
+ self.default = default
+ self.description = description
+ self.envs: Sequence[str] | None = None
+ self.apply_default_on_none = apply_default_on_none
+
+ # See #585
+ self.is_type_of = operations.get("is_type_of")
+
+ if isinstance(env, str):
+ self.envs = [env]
+ elif isinstance(env, (list, tuple)):
+ self.envs = env
+
+ def __or__(self, other: Validator) -> CombinedValidator:
+ return OrValidator(self, other, description=self.description)
+
+ def __and__(self, other: Validator) -> CombinedValidator:
+ return AndValidator(self, other, description=self.description)
+
+ def __eq__(self, other: object) -> bool:
+ if self is other:
+ return True
+
+ if type(self).__name__ != type(other).__name__:
+ return False
+
+ identical_attrs = (
+ getattr(self, attr) == getattr(other, attr)
+ for attr in EQUALITY_ATTRS
+ )
+ if all(identical_attrs):
+ return True
+
+ return False
+
+ def validate(
+ self,
+ settings: Any,
+ only: str | Sequence | None = None,
+ exclude: str | Sequence | None = None,
+ only_current_env: bool = False,
+ ) -> None:
+ """Raise ValidationError if invalid"""
+ # If only or exclude are not set, this value always passes startswith
+ only = ensure_a_list(only or [""])
+ if only and not isinstance(only[0], str):
+ raise ValueError("'only' must be a string or list of strings.")
+
+ exclude = ensure_a_list(exclude)
+ if exclude and not isinstance(exclude[0], str):
+ raise ValueError("'exclude' must be a string or list of strings.")
+
+ if self.envs is None:
+ self.envs = [settings.current_env]
+
+ if self.when is not None:
+ try:
+ # inherit env if not defined
+ if self.when.envs is None:
+ self.when.envs = self.envs
+
+ self.when.validate(settings, only=only, exclude=exclude)
+ except ValidationError:
+ # if when is invalid, return canceling validation flow
+ return
+
+ if only_current_env:
+ if settings.current_env.upper() in map(
+ lambda s: s.upper(), self.envs
+ ):
+ self._validate_items(
+ settings, settings.current_env, only=only, exclude=exclude
+ )
+ return
+
+ # If only using current_env, skip using_env decoration (reload)
+ if (
+ len(self.envs) == 1
+ and self.envs[0].upper() == settings.current_env.upper()
+ ):
+ self._validate_items(
+ settings, settings.current_env, only=only, exclude=exclude
+ )
+ return
+
+ for env in self.envs:
+ self._validate_items(
+ settings.from_env(env), only=only, exclude=exclude
+ )
+
+ def _validate_items(
+ self,
+ settings: Any,
+ env: str | None = None,
+ only: str | Sequence | None = None,
+ exclude: str | Sequence | None = None,
+ ) -> None:
+ env = env or settings.current_env
+ for name in self.names:
+ # Skip if only is set and name isn't in the only list
+ if only and not any(name.startswith(sub) for sub in only):
+ continue
+
+ # Skip if exclude is set and name is in the exclude list
+ if exclude and any(name.startswith(sub) for sub in exclude):
+ continue
+
+ if self.default is not empty:
+ default_value = (
+ self.default(settings, self)
+ if callable(self.default)
+ else self.default
+ )
+ else:
+ default_value = empty
+
+ # THIS IS A FIX FOR #585 in contrast with #799
+ # toml considers signed strings "+-1" as integers
+ # however existing users are passing strings
+ # to default on validator (see #585)
+ # The solution we added on #667 introduced a new problem
+ # This fix here makes it to work for both cases.
+ if (
+ isinstance(default_value, str)
+ and default_value.startswith(("+", "-"))
+ and self.is_type_of is str
+ ):
+ # avoid TOML from parsing "+-1" as integer
+ default_value = f"'{default_value}'"
+
+ value = settings.setdefault(
+ name,
+ default_value,
+ apply_default_on_none=self.apply_default_on_none,
+ )
+
+ # is name required but not exists?
+ if self.must_exist is True and value is empty:
+ _message = self.messages["must_exist_true"].format(
+ name=name, env=env
+ )
+ raise ValidationError(_message, details=[(self, _message)])
+
+ if self.must_exist is False and value is not empty:
+ _message = self.messages["must_exist_false"].format(
+ name=name, env=env
+ )
+ raise ValidationError(_message, details=[(self, _message)])
+
+ if self.must_exist in (False, None) and value is empty:
+ continue
+
+ if self.cast:
+ # value or default value already set
+ # by settings.setdefault above
+ # however we need to cast it
+ # so we call .set again
+ value = self.cast(settings.get(name))
+ settings.set(name, value)
+
+ # is there a callable condition?
+ if self.condition is not None:
+ if not self.condition(value):
+ _message = self.messages["condition"].format(
+ name=name,
+ function=self.condition.__name__,
+ value=value,
+ env=env,
+ )
+ raise ValidationError(_message, details=[(self, _message)])
+
+ # operations
+ for op_name, op_value in self.operations.items():
+ op_function = getattr(validator_conditions, op_name)
+ if not op_function(value, op_value):
+ _message = self.messages["operations"].format(
+ name=name,
+ operation=op_function.__name__,
+ op_value=op_value,
+ value=value,
+ env=env,
+ )
+ raise ValidationError(_message, details=[(self, _message)])
+
+
+class CombinedValidator(Validator):
+ def __init__(
+ self,
+ validator_a: Validator,
+ validator_b: Validator,
+ *args: Any,
+ **kwargs: Any,
+ ) -> None:
+ """Takes 2 validators and combines the validation"""
+ self.validators = (validator_a, validator_b)
+ super().__init__(*args, **kwargs)
+ for attr in EQUALITY_ATTRS:
+ if not getattr(self, attr, None):
+ value = tuple(
+ getattr(validator, attr) for validator in self.validators
+ )
+ setattr(self, attr, value)
+
+ def validate(
+ self,
+ settings: Any,
+ only: str | Sequence | None = None,
+ exclude: str | Sequence | None = None,
+ only_current_env: bool = False,
+ ) -> None: # pragma: no cover
+ raise NotImplementedError(
+ "subclasses OrValidator or AndValidator implements this method"
+ )
+
+
+class OrValidator(CombinedValidator):
+ """Evaluates on Validator() | Validator()"""
+
+ def validate(
+ self,
+ settings: Any,
+ only: str | Sequence | None = None,
+ exclude: str | Sequence | None = None,
+ only_current_env: bool = False,
+ ) -> None:
+ """Ensure at least one of the validators are valid"""
+ errors = []
+ for validator in self.validators:
+ try:
+ validator.validate(
+ settings,
+ only=only,
+ exclude=exclude,
+ only_current_env=only_current_env,
+ )
+ except ValidationError as e:
+ errors.append(e)
+ continue
+ else:
+ return
+
+ _message = self.messages["combined"].format(
+ errors=" or ".join(
+ str(e).replace("combined validators failed ", "")
+ for e in errors
+ )
+ )
+ raise ValidationError(_message, details=[(self, _message)])
+
+
+class AndValidator(CombinedValidator):
+ """Evaluates on Validator() & Validator()"""
+
+ def validate(
+ self,
+ settings: Any,
+ only: str | Sequence | None = None,
+ exclude: str | Sequence | None = None,
+ only_current_env: bool = False,
+ ) -> None:
+ """Ensure both the validators are valid"""
+ errors = []
+ for validator in self.validators:
+ try:
+ validator.validate(
+ settings,
+ only=only,
+ exclude=exclude,
+ only_current_env=only_current_env,
+ )
+ except ValidationError as e:
+ errors.append(e)
+ continue
+
+ if errors:
+ _message = self.messages["combined"].format(
+ errors=" and ".join(
+ str(e).replace("combined validators failed ", "")
+ for e in errors
+ )
+ )
+ raise ValidationError(_message, details=[(self, _message)])
+
+
+class ValidatorList(list):
+ def __init__(
+ self,
+ settings: Any,
+ validators: Sequence[Validator] | None = None,
+ *args: Validator,
+ **kwargs: Any,
+ ) -> None:
+ if isinstance(validators, (list, tuple)):
+ args = list(args) + list(validators) # type: ignore
+ self._only = kwargs.pop("validate_only", None)
+ self._exclude = kwargs.pop("validate_exclude", None)
+ super().__init__(args, **kwargs) # type: ignore
+ self.settings = settings
+
+ def register(self, *args: Validator, **kwargs: Validator):
+ validators: list[Validator] = list(
+ chain.from_iterable(kwargs.values()) # type: ignore
+ )
+ validators.extend(args)
+ for validator in validators:
+ if validator and validator not in self:
+ self.append(validator)
+
+ def descriptions(self, flat: bool = False) -> dict[str, str | list[str]]:
+
+ if flat:
+ descriptions: dict[str, str | list[str]] = {}
+ else:
+ descriptions = defaultdict(list)
+
+ for validator in self:
+ for name in validator.names:
+ if isinstance(name, tuple) and len(name) > 0:
+ name = name[0]
+ if flat:
+ descriptions.setdefault(name, validator.description)
+ else:
+ descriptions[name].append( # type: ignore
+ validator.description
+ )
+ return descriptions
+
+ def validate(
+ self,
+ only: str | Sequence | None = None,
+ exclude: str | Sequence | None = None,
+ only_current_env: bool = False,
+ ) -> None:
+ for validator in self:
+ validator.validate(
+ self.settings,
+ only=only,
+ exclude=exclude,
+ only_current_env=only_current_env,
+ )
+
+ def validate_all(
+ self,
+ only: str | Sequence | None = None,
+ exclude: str | Sequence | None = None,
+ only_current_env: bool = False,
+ ) -> None:
+ errors = []
+ details = []
+ for validator in self:
+ try:
+ validator.validate(
+ self.settings,
+ only=only,
+ exclude=exclude,
+ only_current_env=only_current_env,
+ )
+ except ValidationError as e:
+ errors.append(e)
+ details.append((validator, str(e)))
+ continue
+
+ if errors:
+ raise ValidationError(
+ "; ".join(str(e) for e in errors), details=details
+ )
diff --git a/libs/dynaconf/validator_conditions.py b/libs/dynaconf/validator_conditions.py
new file mode 100644
index 000000000..96d151011
--- /dev/null
+++ b/libs/dynaconf/validator_conditions.py
@@ -0,0 +1,90 @@
+# pragma: no cover
+"""
+Implement basic assertions to be used in assertion action
+"""
+from __future__ import annotations
+
+
+def eq(value, other):
+ """Equal"""
+ return value == other
+
+
+def ne(value, other):
+ """Not equal"""
+ return value != other
+
+
+def gt(value, other):
+ """Greater than"""
+ return value > other
+
+
+def lt(value, other):
+ """Lower than"""
+ return value < other
+
+
+def gte(value, other):
+ """Greater than or equal"""
+ return value >= other
+
+
+def lte(value, other):
+ """Lower than or equal"""
+ return value <= other
+
+
+def identity(value, other):
+ """Identity check using ID"""
+ return value is other
+
+
+def is_type_of(value, other):
+ """Type check"""
+ return isinstance(value, other)
+
+
+def is_in(value, other):
+ """Existence"""
+ return value in other
+
+
+def is_not_in(value, other):
+ """Inexistence"""
+ return value not in other
+
+
+def cont(value, other):
+ """Contains"""
+ return other in value
+
+
+def len_eq(value, other):
+ """Length Equal"""
+ return len(value) == other
+
+
+def len_ne(value, other):
+ """Length Not equal"""
+ return len(value) != other
+
+
+def len_min(value, other):
+ """Minimum length"""
+ return len(value) >= other
+
+
+def len_max(value, other):
+ """Maximum length"""
+ return len(value) <= other
+
+
+def startswith(value, term):
+ """returns value.startswith(term) result"""
+ return value.startswith(term)
+
+
+def endswith(value, term):
+ """returns value.endswith(term) result"""
+ return value.endswith(term)
diff --git a/libs/dynaconf/vendor/__init__.py b/libs/dynaconf/vendor/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/libs/dynaconf/vendor/__init__.py
diff --git a/libs/dynaconf/vendor/box/__init__.py b/libs/dynaconf/vendor/box/__init__.py
new file mode 100644
index 000000000..ad571e425
--- /dev/null
+++ b/libs/dynaconf/vendor/box/__init__.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+__author__ = 'Chris Griffith'
+__version__ = '4.2.3'
+
+from .box import Box
+from .box_list import BoxList
+from .config_box import ConfigBox
+from .shorthand_box import SBox
+from .exceptions import BoxError, BoxKeyError
+from .from_file import box_from_file
+
+
+
diff --git a/libs/dynaconf/vendor/box/box.py b/libs/dynaconf/vendor/box/box.py
new file mode 100644
index 000000000..0b4c1d283
--- /dev/null
+++ b/libs/dynaconf/vendor/box/box.py
@@ -0,0 +1,689 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+#
+# Copyright (c) 2017-2020 - Chris Griffith - MIT License
+"""
+Improved dictionary access through dot notation with additional tools.
+"""
+import copy
+import re
+import string
+import warnings
+from collections.abc import Iterable, Mapping, Callable
+from keyword import kwlist
+from pathlib import Path
+from typing import Any, Union, Tuple, List, Dict
+
+from dynaconf.vendor import box
+from .converters import (_to_json, _from_json, _from_toml, _to_toml, _from_yaml, _to_yaml, BOX_PARAMETERS)
+from .exceptions import BoxError, BoxKeyError, BoxTypeError, BoxValueError, BoxWarning
+
+__all__ = ['Box']
+
+_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
+_all_cap_re = re.compile('([a-z0-9])([A-Z])')
+_list_pos_re = re.compile(r'\[(\d+)\]')
+
+# a sentinel object for indicating no default, in order to allow users
+# to pass `None` as a valid default value
+NO_DEFAULT = object()
+
+
+def _camel_killer(attr):
+ """
+ CamelKiller, qu'est-ce que c'est?
+
+ Taken from http://stackoverflow.com/a/1176023/3244542
+ """
+ attr = str(attr)
+
+ s1 = _first_cap_re.sub(r'\1_\2', attr)
+ s2 = _all_cap_re.sub(r'\1_\2', s1)
+ return re.sub(' *_+', '_', s2.lower())
+
+
+def _recursive_tuples(iterable, box_class, recreate_tuples=False, **kwargs):
+ out_list = []
+ for i in iterable:
+ if isinstance(i, dict):
+ out_list.append(box_class(i, **kwargs))
+ elif isinstance(i, list) or (recreate_tuples and isinstance(i, tuple)):
+ out_list.append(_recursive_tuples(i, box_class, recreate_tuples, **kwargs))
+ else:
+ out_list.append(i)
+ return tuple(out_list)
+
+
+def _parse_box_dots(item):
+ for idx, char in enumerate(item):
+ if char == '[':
+ return item[:idx], item[idx:]
+ elif char == '.':
+ return item[:idx], item[idx + 1:]
+ raise BoxError('Could not split box dots properly')
+
+
+def _get_box_config():
+ return {
+ # Internal use only
+ '__created': False,
+ '__safe_keys': {}
+ }
+
+
+class Box(dict):
+ """
+ Improved dictionary access through dot notation with additional tools.
+
+ :param default_box: Similar to defaultdict, return a default value
+ :param default_box_attr: Specify the default replacement.
+ WARNING: If this is not the default 'Box', it will not be recursive
+ :param default_box_none_transform: When using default_box, treat keys with none values as absent. True by default
+ :param frozen_box: After creation, the box cannot be modified
+ :param camel_killer_box: Convert CamelCase to snake_case
+ :param conversion_box: Check for near matching keys as attributes
+ :param modify_tuples_box: Recreate incoming tuples with dicts into Boxes
+ :param box_safe_prefix: Conversion box prefix for unsafe attributes
+ :param box_duplicates: "ignore", "error" or "warn" when duplicates exists in a conversion_box
+ :param box_intact_types: tuple of types to ignore converting
+ :param box_recast: cast certain keys to a specified type
+ :param box_dots: access nested Boxes by period separated keys in string
+ """
+
+ _protected_keys = [
+ "to_dict",
+ "to_json",
+ "to_yaml",
+ "from_yaml",
+ "from_json",
+ "from_toml",
+ "to_toml",
+ "merge_update",
+ ] + [attr for attr in dir({}) if not attr.startswith("_")]
+
+ def __new__(cls, *args: Any, box_settings: Any = None, default_box: bool = False, default_box_attr: Any = NO_DEFAULT,
+ default_box_none_transform: bool = True, frozen_box: bool = False, camel_killer_box: bool = False,
+ conversion_box: bool = True, modify_tuples_box: bool = False, box_safe_prefix: str = 'x',
+ box_duplicates: str = 'ignore', box_intact_types: Union[Tuple, List] = (),
+ box_recast: Dict = None, box_dots: bool = False, **kwargs: Any):
+ """
+ Due to the way pickling works in python 3, we need to make sure
+ the box config is created as early as possible.
+ """
+ obj = super(Box, cls).__new__(cls, *args, **kwargs)
+ obj._box_config = _get_box_config()
+ obj._box_config.update({
+ 'default_box': default_box,
+ 'default_box_attr': cls.__class__ if default_box_attr is NO_DEFAULT else default_box_attr,
+ 'default_box_none_transform': default_box_none_transform,
+ 'conversion_box': conversion_box,
+ 'box_safe_prefix': box_safe_prefix,
+ 'frozen_box': frozen_box,
+ 'camel_killer_box': camel_killer_box,
+ 'modify_tuples_box': modify_tuples_box,
+ 'box_duplicates': box_duplicates,
+ 'box_intact_types': tuple(box_intact_types),
+ 'box_recast': box_recast,
+ 'box_dots': box_dots,
+ 'box_settings': box_settings or {}
+ })
+ return obj
+
+ def __init__(self, *args: Any, box_settings: Any = None, default_box: bool = False, default_box_attr: Any = NO_DEFAULT,
+ default_box_none_transform: bool = True, frozen_box: bool = False, camel_killer_box: bool = False,
+ conversion_box: bool = True, modify_tuples_box: bool = False, box_safe_prefix: str = 'x',
+ box_duplicates: str = 'ignore', box_intact_types: Union[Tuple, List] = (),
+ box_recast: Dict = None, box_dots: bool = False, **kwargs: Any):
+ super().__init__()
+ self._box_config = _get_box_config()
+ self._box_config.update({
+ 'default_box': default_box,
+ 'default_box_attr': self.__class__ if default_box_attr is NO_DEFAULT else default_box_attr,
+ 'default_box_none_transform': default_box_none_transform,
+ 'conversion_box': conversion_box,
+ 'box_safe_prefix': box_safe_prefix,
+ 'frozen_box': frozen_box,
+ 'camel_killer_box': camel_killer_box,
+ 'modify_tuples_box': modify_tuples_box,
+ 'box_duplicates': box_duplicates,
+ 'box_intact_types': tuple(box_intact_types),
+ 'box_recast': box_recast,
+ 'box_dots': box_dots,
+ 'box_settings': box_settings or {}
+ })
+ if not self._box_config['conversion_box'] and self._box_config['box_duplicates'] != 'ignore':
+ raise BoxError('box_duplicates are only for conversion_boxes')
+ if len(args) == 1:
+ if isinstance(args[0], str):
+ raise BoxValueError('Cannot extrapolate Box from string')
+ if isinstance(args[0], Mapping):
+ for k, v in args[0].items():
+ if v is args[0]:
+ v = self
+
+ if v is None and self._box_config['default_box'] and self._box_config['default_box_none_transform']:
+ continue
+ self.__setitem__(k, v)
+ elif isinstance(args[0], Iterable):
+ for k, v in args[0]:
+ self.__setitem__(k, v)
+ else:
+ raise BoxValueError('First argument must be mapping or iterable')
+ elif args:
+ raise BoxTypeError(f'Box expected at most 1 argument, got {len(args)}')
+
+ for k, v in kwargs.items():
+ if args and isinstance(args[0], Mapping) and v is args[0]:
+ v = self
+ self.__setitem__(k, v)
+
+ self._box_config['__created'] = True
+
+ def __add__(self, other: dict):
+ new_box = self.copy()
+ if not isinstance(other, dict):
+ raise BoxTypeError(f'Box can only merge two boxes or a box and a dictionary.')
+ new_box.merge_update(other)
+ return new_box
+
+ def __hash__(self):
+ if self._box_config['frozen_box']:
+ hashing = 54321
+ for item in self.items():
+ hashing ^= hash(item)
+ return hashing
+ raise BoxTypeError('unhashable type: "Box"')
+
+ def __dir__(self):
+ allowed = string.ascii_letters + string.digits + '_'
+ items = set(super().__dir__())
+ # Only show items accessible by dot notation
+ for key in self.keys():
+ key = str(key)
+ if ' ' not in key and key[0] not in string.digits and key not in kwlist:
+ for letter in key:
+ if letter not in allowed:
+ break
+ else:
+ items.add(key)
+
+ for key in self.keys():
+ if key not in items:
+ if self._box_config['conversion_box']:
+ key = self._safe_attr(key)
+ if key:
+ items.add(key)
+
+ return list(items)
+
+ def get(self, key, default=NO_DEFAULT):
+ if key not in self:
+ if default is NO_DEFAULT:
+ if self._box_config['default_box'] and self._box_config['default_box_none_transform']:
+ return self.__get_default(key)
+ else:
+ return None
+ if isinstance(default, dict) and not isinstance(default, Box):
+ return Box(default, box_settings=self._box_config.get("box_settings"))
+ if isinstance(default, list) and not isinstance(default, box.BoxList):
+ return box.BoxList(default)
+ return default
+ return self[key]
+
+ def copy(self):
+ return Box(super().copy(), **self.__box_config())
+
+ def __copy__(self):
+ return Box(super().copy(), **self.__box_config())
+
+ def __deepcopy__(self, memodict=None):
+ frozen = self._box_config['frozen_box']
+ config = self.__box_config()
+ config['frozen_box'] = False
+ out = self.__class__(**config)
+ memodict = memodict or {}
+ memodict[id(self)] = out
+ for k, v in self.items():
+ out[copy.deepcopy(k, memodict)] = copy.deepcopy(v, memodict)
+ out._box_config['frozen_box'] = frozen
+ return out
+
+ def __setstate__(self, state):
+ self._box_config = state['_box_config']
+ self.__dict__.update(state)
+
+ def keys(self):
+ return super().keys()
+
+ def values(self):
+ return [self[x] for x in self.keys()]
+
+ def items(self):
+ return [(x, self[x]) for x in self.keys()]
+
+ def __get_default(self, item):
+ default_value = self._box_config['default_box_attr']
+ if default_value in (self.__class__, dict):
+ value = self.__class__(**self.__box_config())
+ elif isinstance(default_value, dict):
+ value = self.__class__(**self.__box_config(), **default_value)
+ elif isinstance(default_value, list):
+ value = box.BoxList(**self.__box_config())
+ elif isinstance(default_value, Callable):
+ value = default_value()
+ elif hasattr(default_value, 'copy'):
+ value = default_value.copy()
+ else:
+ value = default_value
+ self.__convert_and_store(item, value)
+ return value
+
+ def __box_config(self):
+ out = {}
+ for k, v in self._box_config.copy().items():
+ if not k.startswith('__'):
+ out[k] = v
+ return out
+
+ def __recast(self, item, value):
+ if self._box_config['box_recast'] and item in self._box_config['box_recast']:
+ try:
+ return self._box_config['box_recast'][item](value)
+ except ValueError:
+ raise BoxValueError(f'Cannot convert {value} to {self._box_config["box_recast"][item]}') from None
+ return value
+
+ def __convert_and_store(self, item, value):
+ if self._box_config['conversion_box']:
+ safe_key = self._safe_attr(item)
+ self._box_config['__safe_keys'][safe_key] = item
+ if isinstance(value, (int, float, str, bytes, bytearray, bool, complex, set, frozenset)):
+ return super().__setitem__(item, value)
+ # If the value has already been converted or should not be converted, return it as-is
+ if self._box_config['box_intact_types'] and isinstance(value, self._box_config['box_intact_types']):
+ return super().__setitem__(item, value)
+ # This is the magic sauce that makes sub dictionaries into new box objects
+ if isinstance(value, dict) and not isinstance(value, Box):
+ value = self.__class__(value, **self.__box_config())
+ elif isinstance(value, list) and not isinstance(value, box.BoxList):
+ if self._box_config['frozen_box']:
+ value = _recursive_tuples(value,
+ self.__class__,
+ recreate_tuples=self._box_config['modify_tuples_box'],
+ **self.__box_config())
+ else:
+ value = box.BoxList(value, box_class=self.__class__, **self.__box_config())
+ elif self._box_config['modify_tuples_box'] and isinstance(value, tuple):
+ value = _recursive_tuples(value, self.__class__, recreate_tuples=True, **self.__box_config())
+ super().__setitem__(item, value)
+
+ def __getitem__(self, item, _ignore_default=False):
+ try:
+ return super().__getitem__(item)
+ except KeyError as err:
+ if item == '_box_config':
+ raise BoxKeyError('_box_config should only exist as an attribute and is never defaulted') from None
+ if self._box_config['box_dots'] and isinstance(item, str) and ('.' in item or '[' in item):
+ first_item, children = _parse_box_dots(item)
+ if first_item in self.keys():
+ if hasattr(self[first_item], '__getitem__'):
+ return self[first_item][children]
+ if self._box_config['camel_killer_box'] and isinstance(item, str):
+ converted = _camel_killer(item)
+ if converted in self.keys():
+ return super().__getitem__(converted)
+ if self._box_config['default_box'] and not _ignore_default:
+ return self.__get_default(item)
+ raise BoxKeyError(str(err)) from None
+
+ def __getattr__(self, item):
+ try:
+ try:
+ value = self.__getitem__(item, _ignore_default=True)
+ except KeyError:
+ value = object.__getattribute__(self, item)
+ except AttributeError as err:
+ if item == '__getstate__':
+ raise BoxKeyError(item) from None
+ if item == '_box_config':
+ raise BoxError('_box_config key must exist') from None
+ if self._box_config['conversion_box']:
+ safe_key = self._safe_attr(item)
+ if safe_key in self._box_config['__safe_keys']:
+ return self.__getitem__(self._box_config['__safe_keys'][safe_key])
+ if self._box_config['default_box']:
+ return self.__get_default(item)
+ raise BoxKeyError(str(err)) from None
+ return value
+
+ def __setitem__(self, key, value):
+ if key != '_box_config' and self._box_config['__created'] and self._box_config['frozen_box']:
+ raise BoxError('Box is frozen')
+ if self._box_config['box_dots'] and isinstance(key, str) and '.' in key:
+ first_item, children = _parse_box_dots(key)
+ if first_item in self.keys():
+ if hasattr(self[first_item], '__setitem__'):
+ return self[first_item].__setitem__(children, value)
+ value = self.__recast(key, value)
+ if key not in self.keys() and self._box_config['camel_killer_box']:
+ if self._box_config['camel_killer_box'] and isinstance(key, str):
+ key = _camel_killer(key)
+ if self._box_config['conversion_box'] and self._box_config['box_duplicates'] != 'ignore':
+ self._conversion_checks(key)
+ self.__convert_and_store(key, value)
+
+ def __setattr__(self, key, value):
+ if key != '_box_config' and self._box_config['frozen_box'] and self._box_config['__created']:
+ raise BoxError('Box is frozen')
+ if key in self._protected_keys:
+ raise BoxKeyError(f'Key name "{key}" is protected')
+ if key == '_box_config':
+ return object.__setattr__(self, key, value)
+ value = self.__recast(key, value)
+ safe_key = self._safe_attr(key)
+ if safe_key in self._box_config['__safe_keys']:
+ key = self._box_config['__safe_keys'][safe_key]
+ self.__setitem__(key, value)
+
+ def __delitem__(self, key):
+ if self._box_config['frozen_box']:
+ raise BoxError('Box is frozen')
+ if key not in self.keys() and self._box_config['box_dots'] and isinstance(key, str) and '.' in key:
+ first_item, children = key.split('.', 1)
+ if first_item in self.keys() and isinstance(self[first_item], dict):
+ return self[first_item].__delitem__(children)
+ if key not in self.keys() and self._box_config['camel_killer_box']:
+ if self._box_config['camel_killer_box'] and isinstance(key, str):
+ for each_key in self:
+ if _camel_killer(key) == each_key:
+ key = each_key
+ break
+ super().__delitem__(key)
+
+ def __delattr__(self, item):
+ if self._box_config['frozen_box']:
+ raise BoxError('Box is frozen')
+ if item == '_box_config':
+ raise BoxError('"_box_config" is protected')
+ if item in self._protected_keys:
+ raise BoxKeyError(f'Key name "{item}" is protected')
+ try:
+ self.__delitem__(item)
+ except KeyError as err:
+ if self._box_config['conversion_box']:
+ safe_key = self._safe_attr(item)
+ if safe_key in self._box_config['__safe_keys']:
+ self.__delitem__(self._box_config['__safe_keys'][safe_key])
+ del self._box_config['__safe_keys'][safe_key]
+ return
+ raise BoxKeyError(err)
+
+ def pop(self, key, *args):
+ if args:
+ if len(args) != 1:
+ raise BoxError('pop() takes only one optional argument "default"')
+ try:
+ item = self[key]
+ except KeyError:
+ return args[0]
+ else:
+ del self[key]
+ return item
+ try:
+ item = self[key]
+ except KeyError:
+ raise BoxKeyError('{0}'.format(key)) from None
+ else:
+ del self[key]
+ return item
+
+ def clear(self):
+ super().clear()
+ self._box_config['__safe_keys'].clear()
+
+ def popitem(self):
+ try:
+ key = next(self.__iter__())
+ except StopIteration:
+ raise BoxKeyError('Empty box') from None
+ return key, self.pop(key)
+
+ def __repr__(self):
+ return f'<Box: {self.to_dict()}>'
+
+ def __str__(self):
+ return str(self.to_dict())
+
+ def __iter__(self):
+ for key in self.keys():
+ yield key
+
+ def __reversed__(self):
+ for key in reversed(list(self.keys())):
+ yield key
+
+ def to_dict(self):
+ """
+ Turn the Box and sub Boxes back into a native python dictionary.
+
+ :return: python dictionary of this Box
+ """
+ out_dict = dict(self)
+ for k, v in out_dict.items():
+ if v is self:
+ out_dict[k] = out_dict
+ elif isinstance(v, Box):
+ out_dict[k] = v.to_dict()
+ elif isinstance(v, box.BoxList):
+ out_dict[k] = v.to_list()
+ return out_dict
+
+ def update(self, __m=None, **kwargs):
+ if __m:
+ if hasattr(__m, 'keys'):
+ for k in __m:
+ self.__convert_and_store(k, __m[k])
+ else:
+ for k, v in __m:
+ self.__convert_and_store(k, v)
+ for k in kwargs:
+ self.__convert_and_store(k, kwargs[k])
+
+ def merge_update(self, __m=None, **kwargs):
+ def convert_and_set(k, v):
+ intact_type = (self._box_config['box_intact_types'] and isinstance(v, self._box_config['box_intact_types']))
+ if isinstance(v, dict) and not intact_type:
+ # Box objects must be created in case they are already
+ # in the `converted` box_config set
+ v = self.__class__(v, **self.__box_config())
+ if k in self and isinstance(self[k], dict):
+ if isinstance(self[k], Box):
+ self[k].merge_update(v)
+ else:
+ self[k].update(v)
+ return
+ if isinstance(v, list) and not intact_type:
+ v = box.BoxList(v, **self.__box_config())
+ self.__setitem__(k, v)
+
+ if __m:
+ if hasattr(__m, 'keys'):
+ for key in __m:
+ convert_and_set(key, __m[key])
+ else:
+ for key, value in __m:
+ convert_and_set(key, value)
+ for key in kwargs:
+ convert_and_set(key, kwargs[key])
+
+ def setdefault(self, item, default=None):
+ if item in self:
+ return self[item]
+
+ if isinstance(default, dict):
+ default = self.__class__(default, **self.__box_config())
+ if isinstance(default, list):
+ default = box.BoxList(default, box_class=self.__class__, **self.__box_config())
+ self[item] = default
+ return default
+
+ def _safe_attr(self, attr):
+ """Convert a key into something that is accessible as an attribute"""
+ allowed = string.ascii_letters + string.digits + '_'
+
+ if isinstance(attr, tuple):
+ attr = "_".join([str(x) for x in attr])
+
+ attr = attr.decode('utf-8', 'ignore') if isinstance(attr, bytes) else str(attr)
+ if self.__box_config()['camel_killer_box']:
+ attr = _camel_killer(attr)
+
+ out = []
+ last_safe = 0
+ for i, character in enumerate(attr):
+ if character in allowed:
+ last_safe = i
+ out.append(character)
+ elif not out:
+ continue
+ else:
+ if last_safe == i - 1:
+ out.append('_')
+
+ out = "".join(out)[:last_safe + 1]
+
+ try:
+ int(out[0])
+ except (ValueError, IndexError):
+ pass
+ else:
+ out = f'{self.__box_config()["box_safe_prefix"]}{out}'
+
+ if out in kwlist:
+ out = f'{self.__box_config()["box_safe_prefix"]}{out}'
+
+ return out
+
+ def _conversion_checks(self, item):
+ """
+ Internal use for checking if a duplicate safe attribute already exists
+
+ :param item: Item to see if a dup exists
+ :param keys: Keys to check against
+ """
+ safe_item = self._safe_attr(item)
+
+ if safe_item in self._box_config['__safe_keys']:
+ dups = [f'{item}({safe_item})', f'{self._box_config["__safe_keys"][safe_item]}({safe_item})']
+ if self._box_config['box_duplicates'].startswith('warn'):
+ warnings.warn(f'Duplicate conversion attributes exist: {dups}', BoxWarning)
+ else:
+ raise BoxError(f'Duplicate conversion attributes exist: {dups}')
+
+ def to_json(self, filename: Union[str, Path] = None, encoding: str = 'utf-8', errors: str = 'strict',
+ **json_kwargs):
+ """
+ Transform the Box object into a JSON string.
+
+ :param filename: If provided will save to file
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param json_kwargs: additional arguments to pass to json.dump(s)
+ :return: string of JSON (if no filename provided)
+ """
+ return _to_json(self.to_dict(), filename=filename, encoding=encoding, errors=errors, **json_kwargs)
+
+ @classmethod
+ def from_json(cls, json_string: str = None, filename: Union[str, Path] = None, encoding: str = 'utf-8',
+ errors: str = 'strict', **kwargs):
+ """
+ Transform a json object string into a Box object. If the incoming
+ json is a list, you must use BoxList.from_json.
+
+ :param json_string: string to pass to `json.loads`
+ :param filename: filename to open and pass to `json.load`
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param kwargs: parameters to pass to `Box()` or `json.loads`
+ :return: Box object from json data
+ """
+ box_args = {}
+ for arg in kwargs.copy():
+ if arg in BOX_PARAMETERS:
+ box_args[arg] = kwargs.pop(arg)
+
+ data = _from_json(json_string, filename=filename, encoding=encoding, errors=errors, **kwargs)
+
+ if not isinstance(data, dict):
+ raise BoxError(f'json data not returned as a dictionary, but rather a {type(data).__name__}')
+ return cls(data, **box_args)
+
+ def to_yaml(self, filename: Union[str, Path] = None, default_flow_style: bool = False, encoding: str = 'utf-8',
+ errors: str = 'strict', **yaml_kwargs):
+ """
+ Transform the Box object into a YAML string.
+
+ :param filename: If provided will save to file
+ :param default_flow_style: False will recursively dump dicts
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param yaml_kwargs: additional arguments to pass to yaml.dump
+ :return: string of YAML (if no filename provided)
+ """
+ return _to_yaml(self.to_dict(), filename=filename, default_flow_style=default_flow_style,
+ encoding=encoding, errors=errors, **yaml_kwargs)
+
+ @classmethod
+ def from_yaml(cls, yaml_string: str = None, filename: Union[str, Path] = None, encoding: str = 'utf-8',
+ errors: str = 'strict', **kwargs):
+ """
+ Transform a yaml object string into a Box object. By default will use SafeLoader.
+
+ :param yaml_string: string to pass to `yaml.load`
+ :param filename: filename to open and pass to `yaml.load`
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param kwargs: parameters to pass to `Box()` or `yaml.load`
+ :return: Box object from yaml data
+ """
+ box_args = {}
+ for arg in kwargs.copy():
+ if arg in BOX_PARAMETERS:
+ box_args[arg] = kwargs.pop(arg)
+
+ data = _from_yaml(yaml_string=yaml_string, filename=filename, encoding=encoding, errors=errors, **kwargs)
+ if not isinstance(data, dict):
+ raise BoxError(f'yaml data not returned as a dictionary but rather a {type(data).__name__}')
+ return cls(data, **box_args)
+
+ def to_toml(self, filename: Union[str, Path] = None, encoding: str = 'utf-8', errors: str = 'strict'):
+ """
+ Transform the Box object into a toml string.
+
+ :param filename: File to write toml object too
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :return: string of TOML (if no filename provided)
+ """
+ return _to_toml(self.to_dict(), filename=filename, encoding=encoding, errors=errors)
+
+ @classmethod
+ def from_toml(cls, toml_string: str = None, filename: Union[str, Path] = None,
+ encoding: str = 'utf-8', errors: str = 'strict', **kwargs):
+ """
+ Transforms a toml string or file into a Box object
+
+ :param toml_string: string to pass to `toml.load`
+ :param filename: filename to open and pass to `toml.load`
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param kwargs: parameters to pass to `Box()`
+ :return:
+ """
+ box_args = {}
+ for arg in kwargs.copy():
+ if arg in BOX_PARAMETERS:
+ box_args[arg] = kwargs.pop(arg)
+
+ data = _from_toml(toml_string=toml_string, filename=filename, encoding=encoding, errors=errors)
+ return cls(data, **box_args)
diff --git a/libs/dynaconf/vendor/box/box_list.py b/libs/dynaconf/vendor/box/box_list.py
new file mode 100644
index 000000000..8687c401c
--- /dev/null
+++ b/libs/dynaconf/vendor/box/box_list.py
@@ -0,0 +1,276 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+#
+# Copyright (c) 2017-2020 - Chris Griffith - MIT License
+import copy
+import re
+from typing import Iterable, Optional
+
+
+from dynaconf.vendor import box
+from .converters import (_to_yaml, _from_yaml, _to_json, _from_json,
+ _to_toml, _from_toml, _to_csv, _from_csv, BOX_PARAMETERS)
+from .exceptions import BoxError, BoxTypeError, BoxKeyError
+
+_list_pos_re = re.compile(r'\[(\d+)\]')
+
+
+DYNABOX_CLASS = None # a cache constant to avoid multiple imports
+
+
+def get_dynabox_class_avoiding_circular_import():
+ """
+ See dynaconf issue #462
+ """
+ global DYNABOX_CLASS
+ if DYNABOX_CLASS is None:
+ from dynaconf.utils.boxing import DynaBox
+ DYNABOX_CLASS = DynaBox
+ return DYNABOX_CLASS
+
+
+class BoxList(list):
+ """
+ Drop in replacement of list, that converts added objects to Box or BoxList
+ objects as necessary.
+ """
+
+ def __init__(self, iterable: Iterable = None, box_class : Optional[box.Box] = None, **box_options):
+ self.box_class = box_class or get_dynabox_class_avoiding_circular_import()
+ self.box_options = box_options
+ self.box_org_ref = self.box_org_ref = id(iterable) if iterable else 0
+ if iterable:
+ for x in iterable:
+ self.append(x)
+ if box_options.get('frozen_box'):
+ def frozen(*args, **kwargs):
+ raise BoxError('BoxList is frozen')
+
+ for method in ['append', 'extend', 'insert', 'pop', 'remove', 'reverse', 'sort']:
+ self.__setattr__(method, frozen)
+
+ def __getitem__(self, item):
+ if self.box_options.get('box_dots') and isinstance(item, str) and item.startswith('['):
+ list_pos = _list_pos_re.search(item)
+ value = super(BoxList, self).__getitem__(int(list_pos.groups()[0]))
+ if len(list_pos.group()) == len(item):
+ return value
+ return value.__getitem__(item[len(list_pos.group()):].lstrip('.'))
+ return super(BoxList, self).__getitem__(item)
+
+ def __delitem__(self, key):
+ if self.box_options.get('frozen_box'):
+ raise BoxError('BoxList is frozen')
+ super(BoxList, self).__delitem__(key)
+
+ def __setitem__(self, key, value):
+ if self.box_options.get('frozen_box'):
+ raise BoxError('BoxList is frozen')
+ if self.box_options.get('box_dots') and isinstance(key, str) and key.startswith('['):
+ list_pos = _list_pos_re.search(key)
+ pos = int(list_pos.groups()[0])
+ if len(list_pos.group()) == len(key):
+ return super(BoxList, self).__setitem__(pos, value)
+ return super(BoxList, self).__getitem__(pos).__setitem__(key[len(list_pos.group()):].lstrip('.'), value)
+ super(BoxList, self).__setitem__(key, value)
+
+ def _is_intact_type(self, obj):
+ try:
+ if self.box_options.get('box_intact_types') and isinstance(obj, self.box_options['box_intact_types']):
+ return True
+ except AttributeError as err:
+ if 'box_options' in self.__dict__:
+ raise BoxKeyError(err)
+ return False
+
+ def append(self, p_object):
+ if isinstance(p_object, dict) and not self._is_intact_type(p_object):
+ try:
+ p_object = self.box_class(p_object, **self.box_options)
+ except AttributeError as err:
+ if 'box_class' in self.__dict__:
+ raise BoxKeyError(err)
+ elif isinstance(p_object, list) and not self._is_intact_type(p_object):
+ try:
+ p_object = (self if id(p_object) == self.box_org_ref else BoxList(p_object, **self.box_options))
+ except AttributeError as err:
+ if 'box_org_ref' in self.__dict__:
+ raise BoxKeyError(err)
+ super(BoxList, self).append(p_object)
+
+ def extend(self, iterable):
+ for item in iterable:
+ self.append(item)
+
+ def insert(self, index, p_object):
+ if isinstance(p_object, dict) and not self._is_intact_type(p_object):
+ p_object = self.box_class(p_object, **self.box_options)
+ elif isinstance(p_object, list) and not self._is_intact_type(p_object):
+ p_object = (self if id(p_object) == self.box_org_ref else BoxList(p_object))
+ super(BoxList, self).insert(index, p_object)
+
+ def __repr__(self):
+ return f'<BoxList: {self.to_list()}>'
+
+ def __str__(self):
+ return str(self.to_list())
+
+ def __copy__(self):
+ return BoxList((x for x in self), self.box_class, **self.box_options)
+
+ def __deepcopy__(self, memo=None):
+ out = self.__class__()
+ memo = memo or {}
+ memo[id(self)] = out
+ for k in self:
+ out.append(copy.deepcopy(k, memo=memo))
+ return out
+
+ def __hash__(self):
+ if self.box_options.get('frozen_box'):
+ hashing = 98765
+ hashing ^= hash(tuple(self))
+ return hashing
+ raise BoxTypeError("unhashable type: 'BoxList'")
+
+ def to_list(self):
+ new_list = []
+ for x in self:
+ if x is self:
+ new_list.append(new_list)
+ elif isinstance(x, box.Box):
+ new_list.append(x.to_dict())
+ elif isinstance(x, BoxList):
+ new_list.append(x.to_list())
+ else:
+ new_list.append(x)
+ return new_list
+
+ def to_json(self, filename: str = None, encoding: str = 'utf-8', errors: str = 'strict',
+ multiline: bool = False, **json_kwargs):
+ """
+ Transform the BoxList object into a JSON string.
+
+ :param filename: If provided will save to file
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param multiline: Put each item in list onto it's own line
+ :param json_kwargs: additional arguments to pass to json.dump(s)
+ :return: string of JSON or return of `json.dump`
+ """
+ if filename and multiline:
+ lines = [_to_json(item, filename=False, encoding=encoding, errors=errors, **json_kwargs) for item in self]
+ with open(filename, 'w', encoding=encoding, errors=errors) as f:
+ f.write("\n".join(lines))
+ else:
+ return _to_json(self.to_list(), filename=filename, encoding=encoding, errors=errors, **json_kwargs)
+
+ @classmethod
+ def from_json(cls, json_string: str = None, filename: str = None, encoding: str = 'utf-8', errors: str = 'strict',
+ multiline: bool = False, **kwargs):
+ """
+ Transform a json object string into a BoxList object. If the incoming
+ json is a dict, you must use Box.from_json.
+
+ :param json_string: string to pass to `json.loads`
+ :param filename: filename to open and pass to `json.load`
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param multiline: One object per line
+ :param kwargs: parameters to pass to `Box()` or `json.loads`
+ :return: BoxList object from json data
+ """
+ bx_args = {}
+ for arg in list(kwargs.keys()):
+ if arg in BOX_PARAMETERS:
+ bx_args[arg] = kwargs.pop(arg)
+
+ data = _from_json(json_string, filename=filename, encoding=encoding,
+ errors=errors, multiline=multiline, **kwargs)
+
+ if not isinstance(data, list):
+ raise BoxError(f'json data not returned as a list, but rather a {type(data).__name__}')
+ return cls(data, **bx_args)
+
+ def to_yaml(self, filename: str = None, default_flow_style: bool = False,
+ encoding: str = 'utf-8', errors: str = 'strict', **yaml_kwargs):
+ """
+ Transform the BoxList object into a YAML string.
+
+ :param filename: If provided will save to file
+ :param default_flow_style: False will recursively dump dicts
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param yaml_kwargs: additional arguments to pass to yaml.dump
+ :return: string of YAML or return of `yaml.dump`
+ """
+ return _to_yaml(self.to_list(), filename=filename, default_flow_style=default_flow_style,
+ encoding=encoding, errors=errors, **yaml_kwargs)
+
+ @classmethod
+ def from_yaml(cls, yaml_string: str = None, filename: str = None,
+ encoding: str = 'utf-8', errors: str = 'strict', **kwargs):
+ """
+ Transform a yaml object string into a BoxList object.
+
+ :param yaml_string: string to pass to `yaml.load`
+ :param filename: filename to open and pass to `yaml.load`
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param kwargs: parameters to pass to `BoxList()` or `yaml.load`
+ :return: BoxList object from yaml data
+ """
+ bx_args = {}
+ for arg in list(kwargs.keys()):
+ if arg in BOX_PARAMETERS:
+ bx_args[arg] = kwargs.pop(arg)
+
+ data = _from_yaml(yaml_string=yaml_string, filename=filename, encoding=encoding, errors=errors, **kwargs)
+ if not isinstance(data, list):
+ raise BoxError(f'yaml data not returned as a list but rather a {type(data).__name__}')
+ return cls(data, **bx_args)
+
+ def to_toml(self, filename: str = None, key_name: str = 'toml', encoding: str = 'utf-8', errors: str = 'strict'):
+ """
+ Transform the BoxList object into a toml string.
+
+ :param filename: File to write toml object too
+ :param key_name: Specify the name of the key to store the string under
+ (cannot directly convert to toml)
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :return: string of TOML (if no filename provided)
+ """
+ return _to_toml({key_name: self.to_list()}, filename=filename, encoding=encoding, errors=errors)
+
+ @classmethod
+ def from_toml(cls, toml_string: str = None, filename: str = None, key_name: str = 'toml',
+ encoding: str = 'utf-8', errors: str = 'strict', **kwargs):
+ """
+ Transforms a toml string or file into a BoxList object
+
+ :param toml_string: string to pass to `toml.load`
+ :param filename: filename to open and pass to `toml.load`
+ :param key_name: Specify the name of the key to pull the list from
+ (cannot directly convert from toml)
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param kwargs: parameters to pass to `Box()`
+ :return:
+ """
+ bx_args = {}
+ for arg in list(kwargs.keys()):
+ if arg in BOX_PARAMETERS:
+ bx_args[arg] = kwargs.pop(arg)
+
+ data = _from_toml(toml_string=toml_string, filename=filename, encoding=encoding, errors=errors)
+ if key_name not in data:
+ raise BoxError(f'{key_name} was not found.')
+ return cls(data[key_name], **bx_args)
+
+ def to_csv(self, filename, encoding: str = 'utf-8', errors: str = 'strict'):
+ _to_csv(self, filename=filename, encoding=encoding, errors=errors)
+
+ @classmethod
+ def from_csv(cls, filename, encoding: str = 'utf-8', errors: str = 'strict'):
+ return cls(_from_csv(filename=filename, encoding=encoding, errors=errors))
diff --git a/libs/dynaconf/vendor/box/config_box.py b/libs/dynaconf/vendor/box/config_box.py
new file mode 100644
index 000000000..875699574
--- /dev/null
+++ b/libs/dynaconf/vendor/box/config_box.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+from dynaconf.vendor.box.box import Box
+
+
+class ConfigBox(Box):
+ """
+ Modified box object to add object transforms.
+
+ Allows for build in transforms like:
+
+ cns = ConfigBox(my_bool='yes', my_int='5', my_list='5,4,3,3,2')
+
+ cns.bool('my_bool') # True
+ cns.int('my_int') # 5
+ cns.list('my_list', mod=lambda x: int(x)) # [5, 4, 3, 3, 2]
+ """
+
+ _protected_keys = dir(Box) + ['bool', 'int', 'float', 'list', 'getboolean', 'getfloat', 'getint']
+
+ def __getattr__(self, item):
+ """
+ Config file keys are stored in lower case, be a little more
+ loosey goosey
+ """
+ try:
+ return super().__getattr__(item)
+ except AttributeError:
+ return super().__getattr__(item.lower())
+
+ def __dir__(self):
+ return super().__dir__() + ['bool', 'int', 'float', 'list', 'getboolean', 'getfloat', 'getint']
+
+ def bool(self, item, default=None):
+ """
+ Return value of key as a boolean
+
+ :param item: key of value to transform
+ :param default: value to return if item does not exist
+ :return: approximated bool of value
+ """
+ try:
+ item = self.__getattr__(item)
+ except AttributeError as err:
+ if default is not None:
+ return default
+ raise err
+
+ if isinstance(item, (bool, int)):
+ return bool(item)
+
+ if (isinstance(item, str)
+ and item.lower() in ('n', 'no', 'false', 'f', '0')):
+ return False
+
+ return True if item else False
+
+ def int(self, item, default=None):
+ """
+ Return value of key as an int
+
+ :param item: key of value to transform
+ :param default: value to return if item does not exist
+ :return: int of value
+ """
+ try:
+ item = self.__getattr__(item)
+ except AttributeError as err:
+ if default is not None:
+ return default
+ raise err
+ return int(item)
+
+ def float(self, item, default=None):
+ """
+ Return value of key as a float
+
+ :param item: key of value to transform
+ :param default: value to return if item does not exist
+ :return: float of value
+ """
+ try:
+ item = self.__getattr__(item)
+ except AttributeError as err:
+ if default is not None:
+ return default
+ raise err
+ return float(item)
+
+ def list(self, item, default=None, spliter=",", strip=True, mod=None):
+ """
+ Return value of key as a list
+
+ :param item: key of value to transform
+ :param mod: function to map against list
+ :param default: value to return if item does not exist
+ :param spliter: character to split str on
+ :param strip: clean the list with the `strip`
+ :return: list of items
+ """
+ try:
+ item = self.__getattr__(item)
+ except AttributeError as err:
+ if default is not None:
+ return default
+ raise err
+ if strip:
+ item = item.lstrip('[').rstrip(']')
+ out = [x.strip() if strip else x for x in item.split(spliter)]
+ if mod:
+ return list(map(mod, out))
+ return out
+
+ # loose configparser compatibility
+
+ def getboolean(self, item, default=None):
+ return self.bool(item, default)
+
+ def getint(self, item, default=None):
+ return self.int(item, default)
+
+ def getfloat(self, item, default=None):
+ return self.float(item, default)
+
+ def __repr__(self):
+ return '<ConfigBox: {0}>'.format(str(self.to_dict()))
+
+ def copy(self):
+ return ConfigBox(super().copy())
+
+ def __copy__(self):
+ return ConfigBox(super().copy())
diff --git a/libs/dynaconf/vendor/box/converters.py b/libs/dynaconf/vendor/box/converters.py
new file mode 100644
index 000000000..08694fe1e
--- /dev/null
+++ b/libs/dynaconf/vendor/box/converters.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+# Abstract converter functions for use in any Box class
+
+import csv
+import json
+import sys
+import warnings
+from pathlib import Path
+
+import dynaconf.vendor.ruamel.yaml as yaml
+from dynaconf.vendor.box.exceptions import BoxError, BoxWarning
+from dynaconf.vendor import tomllib as toml
+
+
+BOX_PARAMETERS = ('default_box', 'default_box_attr', 'conversion_box',
+ 'frozen_box', 'camel_killer_box',
+ 'box_safe_prefix', 'box_duplicates', 'ordered_box',
+ 'default_box_none_transform', 'box_dots', 'modify_tuples_box',
+ 'box_intact_types', 'box_recast')
+
+
+def _exists(filename, create=False):
+ path = Path(filename)
+ if create:
+ try:
+ path.touch(exist_ok=True)
+ except OSError as err:
+ raise BoxError(f'Could not create file {filename} - {err}')
+ else:
+ return
+ if not path.exists():
+ raise BoxError(f'File "{filename}" does not exist')
+ if not path.is_file():
+ raise BoxError(f'{filename} is not a file')
+
+
+def _to_json(obj, filename=None, encoding="utf-8", errors="strict", **json_kwargs):
+ json_dump = json.dumps(obj, ensure_ascii=False, **json_kwargs)
+ if filename:
+ _exists(filename, create=True)
+ with open(filename, 'w', encoding=encoding, errors=errors) as f:
+ f.write(json_dump if sys.version_info >= (3, 0) else json_dump.decode("utf-8"))
+ else:
+ return json_dump
+
+
+def _from_json(json_string=None, filename=None, encoding="utf-8", errors="strict", multiline=False, **kwargs):
+ if filename:
+ _exists(filename)
+ with open(filename, 'r', encoding=encoding, errors=errors) as f:
+ if multiline:
+ data = [json.loads(line.strip(), **kwargs) for line in f
+ if line.strip() and not line.strip().startswith("#")]
+ else:
+ data = json.load(f, **kwargs)
+ elif json_string:
+ data = json.loads(json_string, **kwargs)
+ else:
+ raise BoxError('from_json requires a string or filename')
+ return data
+
+
+def _to_yaml(obj, filename=None, default_flow_style=False, encoding="utf-8", errors="strict", **yaml_kwargs):
+ if filename:
+ _exists(filename, create=True)
+ with open(filename, 'w',
+ encoding=encoding, errors=errors) as f:
+ yaml.dump(obj, stream=f, default_flow_style=default_flow_style, **yaml_kwargs)
+ else:
+ return yaml.dump(obj, default_flow_style=default_flow_style, **yaml_kwargs)
+
+
+def _from_yaml(yaml_string=None, filename=None, encoding="utf-8", errors="strict", **kwargs):
+ if 'Loader' not in kwargs:
+ kwargs['Loader'] = yaml.SafeLoader
+ if filename:
+ _exists(filename)
+ with open(filename, 'r', encoding=encoding, errors=errors) as f:
+ data = yaml.load(f, **kwargs)
+ elif yaml_string:
+ data = yaml.load(yaml_string, **kwargs)
+ else:
+ raise BoxError('from_yaml requires a string or filename')
+ return data
+
+
+def _to_toml(obj, filename=None, encoding="utf-8", errors="strict"):
+ if filename:
+ _exists(filename, create=True)
+ with open(filename, 'w', encoding=encoding, errors=errors) as f:
+ toml.dump(obj, f)
+ else:
+ return toml.dumps(obj)
+
+
+def _from_toml(toml_string=None, filename=None, encoding="utf-8", errors="strict"):
+ if filename:
+ _exists(filename)
+ with open(filename, 'r', encoding=encoding, errors=errors) as f:
+ data = toml.load(f)
+ elif toml_string:
+ data = toml.loads(toml_string)
+ else:
+ raise BoxError('from_toml requires a string or filename')
+ return data
+
+
+def _to_csv(box_list, filename, encoding="utf-8", errors="strict"):
+ csv_column_names = list(box_list[0].keys())
+ for row in box_list:
+ if list(row.keys()) != csv_column_names:
+ raise BoxError('BoxList must contain the same dictionary structure for every item to convert to csv')
+
+ if filename:
+ _exists(filename, create=True)
+ with open(filename, 'w', encoding=encoding, errors=errors, newline='') as csv_file:
+ writer = csv.DictWriter(csv_file, fieldnames=csv_column_names)
+ writer.writeheader()
+ for data in box_list:
+ writer.writerow(data)
+
+
+def _from_csv(filename, encoding="utf-8", errors="strict"):
+ _exists(filename)
+ with open(filename, 'r', encoding=encoding, errors=errors, newline='') as f:
+ reader = csv.DictReader(f)
+ return [row for row in reader]
diff --git a/libs/dynaconf/vendor/box/exceptions.py b/libs/dynaconf/vendor/box/exceptions.py
new file mode 100644
index 000000000..57aeaf227
--- /dev/null
+++ b/libs/dynaconf/vendor/box/exceptions.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+
+class BoxError(Exception):
+ """Non standard dictionary exceptions"""
+
+
+class BoxKeyError(BoxError, KeyError, AttributeError):
+ """Key does not exist"""
+
+
+class BoxTypeError(BoxError, TypeError):
+ """Cannot handle that instance's type"""
+
+
+class BoxValueError(BoxError, ValueError):
+ """Issue doing something with that value"""
+
+
+class BoxWarning(UserWarning):
+ """Here be dragons"""
diff --git a/libs/dynaconf/vendor/box/from_file.py b/libs/dynaconf/vendor/box/from_file.py
new file mode 100644
index 000000000..a82ac9659
--- /dev/null
+++ b/libs/dynaconf/vendor/box/from_file.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+from json import JSONDecodeError
+from pathlib import Path
+from typing import Union
+from dynaconf.vendor.tomllib import TOMLDecodeError
+from dynaconf.vendor.ruamel.yaml import YAMLError
+
+
+from .exceptions import BoxError
+from .box import Box
+from .box_list import BoxList
+
+__all__ = ['box_from_file']
+
+
+def _to_json(data):
+ try:
+ return Box.from_json(data)
+ except JSONDecodeError:
+ raise BoxError('File is not JSON as expected')
+ except BoxError:
+ return BoxList.from_json(data)
+
+
+def _to_yaml(data):
+ try:
+ return Box.from_yaml(data)
+ except YAMLError:
+ raise BoxError('File is not YAML as expected')
+ except BoxError:
+ return BoxList.from_yaml(data)
+
+
+def _to_toml(data):
+ try:
+ return Box.from_toml(data)
+ except TOMLDecodeError:
+ raise BoxError('File is not TOML as expected')
+
+
+def box_from_file(file: Union[str, Path], file_type: str = None,
+ encoding: str = "utf-8", errors: str = "strict") -> Union[Box, BoxList]:
+ """
+ Loads the provided file and tries to parse it into a Box or BoxList object as appropriate.
+
+ :param file: Location of file
+ :param encoding: File encoding
+ :param errors: How to handle encoding errors
+ :param file_type: manually specify file type: json, toml or yaml
+ :return: Box or BoxList
+ """
+
+ if not isinstance(file, Path):
+ file = Path(file)
+ if not file.exists():
+ raise BoxError(f'file "{file}" does not exist')
+ data = file.read_text(encoding=encoding, errors=errors)
+ if file_type:
+ if file_type.lower() == 'json':
+ return _to_json(data)
+ if file_type.lower() == 'yaml':
+ return _to_yaml(data)
+ if file_type.lower() == 'toml':
+ return _to_toml(data)
+ raise BoxError(f'"{file_type}" is an unknown type, please use either toml, yaml or json')
+ if file.suffix in ('.json', '.jsn'):
+ return _to_json(data)
+ if file.suffix in ('.yaml', '.yml'):
+ return _to_yaml(data)
+ if file.suffix in ('.tml', '.toml'):
+ return _to_toml(data)
+ raise BoxError(f'Could not determine file type based off extension, please provide file_type')
diff --git a/libs/dynaconf/vendor/box/shorthand_box.py b/libs/dynaconf/vendor/box/shorthand_box.py
new file mode 100644
index 000000000..746f7619a
--- /dev/null
+++ b/libs/dynaconf/vendor/box/shorthand_box.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# -*- coding: UTF-8 -*-
+
+from dynaconf.vendor.box.box import Box
+
+
+class SBox(Box):
+ """
+ ShorthandBox (SBox) allows for
+ property access of `dict` `json` and `yaml`
+ """
+ _protected_keys = dir({}) + ['to_dict', 'to_json', 'to_yaml', 'json', 'yaml', 'from_yaml', 'from_json',
+ 'dict', 'toml', 'from_toml', 'to_toml']
+
+ @property
+ def dict(self):
+ return self.to_dict()
+
+ @property
+ def json(self):
+ return self.to_json()
+
+ @property
+ def yaml(self):
+ return self.to_yaml()
+
+ @property
+ def toml(self):
+ return self.to_toml()
+
+ def __repr__(self):
+ return '<ShorthandBox: {0}>'.format(str(self.to_dict()))
+
+ def copy(self):
+ return SBox(super(SBox, self).copy())
+
+ def __copy__(self):
+ return SBox(super(SBox, self).copy())
diff --git a/libs/dynaconf/vendor/click/__init__.py b/libs/dynaconf/vendor/click/__init__.py
new file mode 100644
index 000000000..9cd0129bf
--- /dev/null
+++ b/libs/dynaconf/vendor/click/__init__.py
@@ -0,0 +1,75 @@
+"""
+Click is a simple Python module inspired by the stdlib optparse to make
+writing command line scripts fun. Unlike other modules, it's based
+around a simple API that does not come with too much magic and is
+composable.
+"""
+from .core import Argument
+from .core import BaseCommand
+from .core import Command
+from .core import CommandCollection
+from .core import Context
+from .core import Group
+from .core import MultiCommand
+from .core import Option
+from .core import Parameter
+from .decorators import argument
+from .decorators import command
+from .decorators import confirmation_option
+from .decorators import group
+from .decorators import help_option
+from .decorators import make_pass_decorator
+from .decorators import option
+from .decorators import pass_context
+from .decorators import pass_obj
+from .decorators import password_option
+from .decorators import version_option
+from .exceptions import Abort
+from .exceptions import BadArgumentUsage
+from .exceptions import BadOptionUsage
+from .exceptions import BadParameter
+from .exceptions import ClickException
+from .exceptions import FileError
+from .exceptions import MissingParameter
+from .exceptions import NoSuchOption
+from .exceptions import UsageError
+from .formatting import HelpFormatter
+from .formatting import wrap_text
+from .globals import get_current_context
+from .parser import OptionParser
+from .termui import clear
+from .termui import confirm
+from .termui import echo_via_pager
+from .termui import edit
+from .termui import get_terminal_size
+from .termui import getchar
+from .termui import launch
+from .termui import pause
+from .termui import progressbar
+from .termui import prompt
+from .termui import secho
+from .termui import style
+from .termui import unstyle
+from .types import BOOL
+from .types import Choice
+from .types import DateTime
+from .types import File
+from .types import FLOAT
+from .types import FloatRange
+from .types import INT
+from .types import IntRange
+from .types import ParamType
+from .types import Path
+from .types import STRING
+from .types import Tuple
+from .types import UNPROCESSED
+from .types import UUID
+from .utils import echo
+from .utils import format_filename
+from .utils import get_app_dir
+from .utils import get_binary_stream
+from .utils import get_os_args
+from .utils import get_text_stream
+from .utils import open_file
+
+__version__ = "8.0.0.dev"
diff --git a/libs/dynaconf/vendor/click/_bashcomplete.py b/libs/dynaconf/vendor/click/_bashcomplete.py
new file mode 100644
index 000000000..b9e4900e0
--- /dev/null
+++ b/libs/dynaconf/vendor/click/_bashcomplete.py
@@ -0,0 +1,371 @@
+import copy
+import os
+import re
+from collections import abc
+
+from .core import Argument
+from .core import MultiCommand
+from .core import Option
+from .parser import split_arg_string
+from .types import Choice
+from .utils import echo
+
+WORDBREAK = "="
+
+# Note, only BASH version 4.4 and later have the nosort option.
+COMPLETION_SCRIPT_BASH = """
+%(complete_func)s() {
+ local IFS=$'\n'
+ COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
+ COMP_CWORD=$COMP_CWORD \\
+ %(autocomplete_var)s=complete $1 ) )
+ return 0
+}
+
+%(complete_func)setup() {
+ local COMPLETION_OPTIONS=""
+ local BASH_VERSION_ARR=(${BASH_VERSION//./ })
+ # Only BASH version 4.4 and later have the nosort option.
+ if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] \
+&& [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
+ COMPLETION_OPTIONS="-o nosort"
+ fi
+
+ complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
+}
+
+%(complete_func)setup
+"""
+
+COMPLETION_SCRIPT_ZSH = """
+#compdef %(script_names)s
+
+%(complete_func)s() {
+ local -a completions
+ local -a completions_with_descriptions
+ local -a response
+ (( ! $+commands[%(script_names)s] )) && return 1
+
+ response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
+ COMP_CWORD=$((CURRENT-1)) \\
+ %(autocomplete_var)s=\"complete_zsh\" \\
+ %(script_names)s )}")
+
+ for key descr in ${(kv)response}; do
+ if [[ "$descr" == "_" ]]; then
+ completions+=("$key")
+ else
+ completions_with_descriptions+=("$key":"$descr")
+ fi
+ done
+
+ if [ -n "$completions_with_descriptions" ]; then
+ _describe -V unsorted completions_with_descriptions -U
+ fi
+
+ if [ -n "$completions" ]; then
+ compadd -U -V unsorted -a completions
+ fi
+ compstate[insert]="automenu"
+}
+
+compdef %(complete_func)s %(script_names)s
+"""
+
+COMPLETION_SCRIPT_FISH = (
+ "complete --no-files --command %(script_names)s --arguments"
+ ' "(env %(autocomplete_var)s=complete_fish'
+ " COMP_WORDS=(commandline -cp) COMP_CWORD=(commandline -t)"
+ ' %(script_names)s)"'
+)
+
+_completion_scripts = {
+ "bash": COMPLETION_SCRIPT_BASH,
+ "zsh": COMPLETION_SCRIPT_ZSH,
+ "fish": COMPLETION_SCRIPT_FISH,
+}
+
+_invalid_ident_char_re = re.compile(r"[^a-zA-Z0-9_]")
+
+
+def get_completion_script(prog_name, complete_var, shell):
+ cf_name = _invalid_ident_char_re.sub("", prog_name.replace("-", "_"))
+ script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH)
+ return (
+ script
+ % {
+ "complete_func": f"_{cf_name}_completion",
+ "script_names": prog_name,
+ "autocomplete_var": complete_var,
+ }
+ ).strip() + ";"
+
+
+def resolve_ctx(cli, prog_name, args):
+ """Parse into a hierarchy of contexts. Contexts are connected
+ through the parent variable.
+
+ :param cli: command definition
+ :param prog_name: the program that is running
+ :param args: full list of args
+ :return: the final context/command parsed
+ """
+ ctx = cli.make_context(prog_name, args, resilient_parsing=True)
+ args = ctx.protected_args + ctx.args
+ while args:
+ if isinstance(ctx.command, MultiCommand):
+ if not ctx.command.chain:
+ cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+ if cmd is None:
+ return ctx
+ ctx = cmd.make_context(
+ cmd_name, args, parent=ctx, resilient_parsing=True
+ )
+ args = ctx.protected_args + ctx.args
+ else:
+ # Walk chained subcommand contexts saving the last one.
+ while args:
+ cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+ if cmd is None:
+ return ctx
+ sub_ctx = cmd.make_context(
+ cmd_name,
+ args,
+ parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False,
+ resilient_parsing=True,
+ )
+ args = sub_ctx.args
+ ctx = sub_ctx
+ args = sub_ctx.protected_args + sub_ctx.args
+ else:
+ break
+ return ctx
+
+
+def start_of_option(param_str):
+ """
+ :param param_str: param_str to check
+ :return: whether or not this is the start of an option declaration
+ (i.e. starts "-" or "--")
+ """
+ return param_str and param_str[:1] == "-"
+
+
+def is_incomplete_option(all_args, cmd_param):
+ """
+ :param all_args: the full original list of args supplied
+ :param cmd_param: the current command parameter
+ :return: whether or not the last option declaration (i.e. starts
+ "-" or "--") is incomplete and corresponds to this cmd_param. In
+ other words whether this cmd_param option can still accept
+ values
+ """
+ if not isinstance(cmd_param, Option):
+ return False
+ if cmd_param.is_flag:
+ return False
+ last_option = None
+ for index, arg_str in enumerate(
+ reversed([arg for arg in all_args if arg != WORDBREAK])
+ ):
+ if index + 1 > cmd_param.nargs:
+ break
+ if start_of_option(arg_str):
+ last_option = arg_str
+
+ return True if last_option and last_option in cmd_param.opts else False
+
+
+def is_incomplete_argument(current_params, cmd_param):
+ """
+ :param current_params: the current params and values for this
+ argument as already entered
+ :param cmd_param: the current command parameter
+ :return: whether or not the last argument is incomplete and
+ corresponds to this cmd_param. In other words whether or not the
+ this cmd_param argument can still accept values
+ """
+ if not isinstance(cmd_param, Argument):
+ return False
+ current_param_values = current_params[cmd_param.name]
+ if current_param_values is None:
+ return True
+ if cmd_param.nargs == -1:
+ return True
+ if (
+ isinstance(current_param_values, abc.Iterable)
+ and cmd_param.nargs > 1
+ and len(current_param_values) < cmd_param.nargs
+ ):
+ return True
+ return False
+
+
+def get_user_autocompletions(ctx, args, incomplete, cmd_param):
+ """
+ :param ctx: context associated with the parsed command
+ :param args: full list of args
+ :param incomplete: the incomplete text to autocomplete
+ :param cmd_param: command definition
+ :return: all the possible user-specified completions for the param
+ """
+ results = []
+ if isinstance(cmd_param.type, Choice):
+ # Choices don't support descriptions.
+ results = [
+ (c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete)
+ ]
+ elif cmd_param.autocompletion is not None:
+ dynamic_completions = cmd_param.autocompletion(
+ ctx=ctx, args=args, incomplete=incomplete
+ )
+ results = [
+ c if isinstance(c, tuple) else (c, None) for c in dynamic_completions
+ ]
+ return results
+
+
+def get_visible_commands_starting_with(ctx, starts_with):
+ """
+ :param ctx: context associated with the parsed command
+ :starts_with: string that visible commands must start with.
+ :return: all visible (not hidden) commands that start with starts_with.
+ """
+ for c in ctx.command.list_commands(ctx):
+ if c.startswith(starts_with):
+ command = ctx.command.get_command(ctx, c)
+ if not command.hidden:
+ yield command
+
+
+def add_subcommand_completions(ctx, incomplete, completions_out):
+ # Add subcommand completions.
+ if isinstance(ctx.command, MultiCommand):
+ completions_out.extend(
+ [
+ (c.name, c.get_short_help_str())
+ for c in get_visible_commands_starting_with(ctx, incomplete)
+ ]
+ )
+
+ # Walk up the context list and add any other completion
+ # possibilities from chained commands
+ while ctx.parent is not None:
+ ctx = ctx.parent
+ if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
+ remaining_commands = [
+ c
+ for c in get_visible_commands_starting_with(ctx, incomplete)
+ if c.name not in ctx.protected_args
+ ]
+ completions_out.extend(
+ [(c.name, c.get_short_help_str()) for c in remaining_commands]
+ )
+
+
+def get_choices(cli, prog_name, args, incomplete):
+ """
+ :param cli: command definition
+ :param prog_name: the program that is running
+ :param args: full list of args
+ :param incomplete: the incomplete text to autocomplete
+ :return: all the possible completions for the incomplete
+ """
+ all_args = copy.deepcopy(args)
+
+ ctx = resolve_ctx(cli, prog_name, args)
+ if ctx is None:
+ return []
+
+ has_double_dash = "--" in all_args
+
+ # In newer versions of bash long opts with '='s are partitioned, but
+ # it's easier to parse without the '='
+ if start_of_option(incomplete) and WORDBREAK in incomplete:
+ partition_incomplete = incomplete.partition(WORDBREAK)
+ all_args.append(partition_incomplete[0])
+ incomplete = partition_incomplete[2]
+ elif incomplete == WORDBREAK:
+ incomplete = ""
+
+ completions = []
+ if not has_double_dash and start_of_option(incomplete):
+ # completions for partial options
+ for param in ctx.command.params:
+ if isinstance(param, Option) and not param.hidden:
+ param_opts = [
+ param_opt
+ for param_opt in param.opts + param.secondary_opts
+ if param_opt not in all_args or param.multiple
+ ]
+ completions.extend(
+ [(o, param.help) for o in param_opts if o.startswith(incomplete)]
+ )
+ return completions
+ # completion for option values from user supplied values
+ for param in ctx.command.params:
+ if is_incomplete_option(all_args, param):
+ return get_user_autocompletions(ctx, all_args, incomplete, param)
+ # completion for argument values from user supplied values
+ for param in ctx.command.params:
+ if is_incomplete_argument(ctx.params, param):
+ return get_user_autocompletions(ctx, all_args, incomplete, param)
+
+ add_subcommand_completions(ctx, incomplete, completions)
+ # Sort before returning so that proper ordering can be enforced in custom types.
+ return sorted(completions)
+
+
+def do_complete(cli, prog_name, include_descriptions):
+ cwords = split_arg_string(os.environ["COMP_WORDS"])
+ cword = int(os.environ["COMP_CWORD"])
+ args = cwords[1:cword]
+ try:
+ incomplete = cwords[cword]
+ except IndexError:
+ incomplete = ""
+
+ for item in get_choices(cli, prog_name, args, incomplete):
+ echo(item[0])
+ if include_descriptions:
+ # ZSH has trouble dealing with empty array parameters when
+ # returned from commands, use '_' to indicate no description
+ # is present.
+ echo(item[1] if item[1] else "_")
+
+ return True
+
+
+def do_complete_fish(cli, prog_name):
+ cwords = split_arg_string(os.environ["COMP_WORDS"])
+ incomplete = os.environ["COMP_CWORD"]
+ args = cwords[1:]
+
+ for item in get_choices(cli, prog_name, args, incomplete):
+ if item[1]:
+ echo(f"{item[0]}\t{item[1]}")
+ else:
+ echo(item[0])
+
+ return True
+
+
+def bashcomplete(cli, prog_name, complete_var, complete_instr):
+ if "_" in complete_instr:
+ command, shell = complete_instr.split("_", 1)
+ else:
+ command = complete_instr
+ shell = "bash"
+
+ if command == "source":
+ echo(get_completion_script(prog_name, complete_var, shell))
+ return True
+ elif command == "complete":
+ if shell == "fish":
+ return do_complete_fish(cli, prog_name)
+ elif shell in {"bash", "zsh"}:
+ return do_complete(cli, prog_name, shell == "zsh")
+
+ return False
diff --git a/libs/dynaconf/vendor/click/_compat.py b/libs/dynaconf/vendor/click/_compat.py
new file mode 100644
index 000000000..85568ca3e
--- /dev/null
+++ b/libs/dynaconf/vendor/click/_compat.py
@@ -0,0 +1,611 @@
+import codecs
+import io
+import os
+import re
+import sys
+from weakref import WeakKeyDictionary
+
+CYGWIN = sys.platform.startswith("cygwin")
+MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version)
+# Determine local App Engine environment, per Google's own suggestion
+APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get(
+ "SERVER_SOFTWARE", ""
+)
+WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2
+DEFAULT_COLUMNS = 80
+auto_wrap_for_ansi = None
+colorama = None
+get_winterm_size = None
+_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]")
+
+
+def get_filesystem_encoding():
+ return sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+def _make_text_stream(
+ stream, encoding, errors, force_readable=False, force_writable=False
+):
+ if encoding is None:
+ encoding = get_best_encoding(stream)
+ if errors is None:
+ errors = "replace"
+ return _NonClosingTextIOWrapper(
+ stream,
+ encoding,
+ errors,
+ line_buffering=True,
+ force_readable=force_readable,
+ force_writable=force_writable,
+ )
+
+
+def is_ascii_encoding(encoding):
+ """Checks if a given encoding is ascii."""
+ try:
+ return codecs.lookup(encoding).name == "ascii"
+ except LookupError:
+ return False
+
+
+def get_best_encoding(stream):
+ """Returns the default stream encoding if not found."""
+ rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
+ if is_ascii_encoding(rv):
+ return "utf-8"
+ return rv
+
+
+class _NonClosingTextIOWrapper(io.TextIOWrapper):
+ def __init__(
+ self,
+ stream,
+ encoding,
+ errors,
+ force_readable=False,
+ force_writable=False,
+ **extra,
+ ):
+ self._stream = stream = _FixupStream(stream, force_readable, force_writable)
+ super().__init__(stream, encoding, errors, **extra)
+
+ def __del__(self):
+ try:
+ self.detach()
+ except Exception:
+ pass
+
+ def isatty(self):
+ # https://bitbucket.org/pypy/pypy/issue/1803
+ return self._stream.isatty()
+
+
+class _FixupStream:
+ """The new io interface needs more from streams than streams
+ traditionally implement. As such, this fix-up code is necessary in
+ some circumstances.
+
+ The forcing of readable and writable flags are there because some tools
+ put badly patched objects on sys (one such offender are certain version
+ of jupyter notebook).
+ """
+
+ def __init__(self, stream, force_readable=False, force_writable=False):
+ self._stream = stream
+ self._force_readable = force_readable
+ self._force_writable = force_writable
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+ def read1(self, size):
+ f = getattr(self._stream, "read1", None)
+ if f is not None:
+ return f(size)
+
+ return self._stream.read(size)
+
+ def readable(self):
+ if self._force_readable:
+ return True
+ x = getattr(self._stream, "readable", None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.read(0)
+ except Exception:
+ return False
+ return True
+
+ def writable(self):
+ if self._force_writable:
+ return True
+ x = getattr(self._stream, "writable", None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.write("")
+ except Exception:
+ try:
+ self._stream.write(b"")
+ except Exception:
+ return False
+ return True
+
+ def seekable(self):
+ x = getattr(self._stream, "seekable", None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.seek(self._stream.tell())
+ except Exception:
+ return False
+ return True
+
+
+def is_bytes(x):
+ return isinstance(x, (bytes, memoryview, bytearray))
+
+
+def _is_binary_reader(stream, default=False):
+ try:
+ return isinstance(stream.read(0), bytes)
+ except Exception:
+ return default
+ # This happens in some cases where the stream was already
+ # closed. In this case, we assume the default.
+
+
+def _is_binary_writer(stream, default=False):
+ try:
+ stream.write(b"")
+ except Exception:
+ try:
+ stream.write("")
+ return False
+ except Exception:
+ pass
+ return default
+ return True
+
+
+def _find_binary_reader(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detaching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_reader(stream, False):
+ return stream
+
+ buf = getattr(stream, "buffer", None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_reader(buf, True):
+ return buf
+
+
+def _find_binary_writer(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detaching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_writer(stream, False):
+ return stream
+
+ buf = getattr(stream, "buffer", None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_writer(buf, True):
+ return buf
+
+
+def _stream_is_misconfigured(stream):
+ """A stream is misconfigured if its encoding is ASCII."""
+ # If the stream does not have an encoding set, we assume it's set
+ # to ASCII. This appears to happen in certain unittest
+ # environments. It's not quite clear what the correct behavior is
+ # but this at least will force Click to recover somehow.
+ return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
+
+
+def _is_compat_stream_attr(stream, attr, value):
+ """A stream attribute is compatible if it is equal to the
+ desired value or the desired value is unset and the attribute
+ has a value.
+ """
+ stream_value = getattr(stream, attr, None)
+ return stream_value == value or (value is None and stream_value is not None)
+
+
+def _is_compatible_text_stream(stream, encoding, errors):
+ """Check if a stream's encoding and errors attributes are
+ compatible with the desired values.
+ """
+ return _is_compat_stream_attr(
+ stream, "encoding", encoding
+ ) and _is_compat_stream_attr(stream, "errors", errors)
+
+
+def _force_correct_text_stream(
+ text_stream,
+ encoding,
+ errors,
+ is_binary,
+ find_binary,
+ force_readable=False,
+ force_writable=False,
+):
+ if is_binary(text_stream, False):
+ binary_reader = text_stream
+ else:
+ # If the stream looks compatible, and won't default to a
+ # misconfigured ascii encoding, return it as-is.
+ if _is_compatible_text_stream(text_stream, encoding, errors) and not (
+ encoding is None and _stream_is_misconfigured(text_stream)
+ ):
+ return text_stream
+
+ # Otherwise, get the underlying binary reader.
+ binary_reader = find_binary(text_stream)
+
+ # If that's not possible, silently use the original reader
+ # and get mojibake instead of exceptions.
+ if binary_reader is None:
+ return text_stream
+
+ # Default errors to replace instead of strict in order to get
+ # something that works.
+ if errors is None:
+ errors = "replace"
+
+ # Wrap the binary stream in a text stream with the correct
+ # encoding parameters.
+ return _make_text_stream(
+ binary_reader,
+ encoding,
+ errors,
+ force_readable=force_readable,
+ force_writable=force_writable,
+ )
+
+
+def _force_correct_text_reader(text_reader, encoding, errors, force_readable=False):
+ return _force_correct_text_stream(
+ text_reader,
+ encoding,
+ errors,
+ _is_binary_reader,
+ _find_binary_reader,
+ force_readable=force_readable,
+ )
+
+
+def _force_correct_text_writer(text_writer, encoding, errors, force_writable=False):
+ return _force_correct_text_stream(
+ text_writer,
+ encoding,
+ errors,
+ _is_binary_writer,
+ _find_binary_writer,
+ force_writable=force_writable,
+ )
+
+
+def get_binary_stdin():
+ reader = _find_binary_reader(sys.stdin)
+ if reader is None:
+ raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
+ return reader
+
+
+def get_binary_stdout():
+ writer = _find_binary_writer(sys.stdout)
+ if writer is None:
+ raise RuntimeError("Was not able to determine binary stream for sys.stdout.")
+ return writer
+
+
+def get_binary_stderr():
+ writer = _find_binary_writer(sys.stderr)
+ if writer is None:
+ raise RuntimeError("Was not able to determine binary stream for sys.stderr.")
+ return writer
+
+
+def get_text_stdin(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True)
+
+
+def get_text_stdout(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True)
+
+
+def get_text_stderr(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True)
+
+
+def filename_to_ui(value):
+ if isinstance(value, bytes):
+ value = value.decode(get_filesystem_encoding(), "replace")
+ else:
+ value = value.encode("utf-8", "surrogateescape").decode("utf-8", "replace")
+ return value
+
+
+def get_strerror(e, default=None):
+ if hasattr(e, "strerror"):
+ msg = e.strerror
+ else:
+ if default is not None:
+ msg = default
+ else:
+ msg = str(e)
+ if isinstance(msg, bytes):
+ msg = msg.decode("utf-8", "replace")
+ return msg
+
+
+def _wrap_io_open(file, mode, encoding, errors):
+ """Handles not passing ``encoding`` and ``errors`` in binary mode."""
+ if "b" in mode:
+ return open(file, mode)
+
+ return open(file, mode, encoding=encoding, errors=errors)
+
+
+def open_stream(filename, mode="r", encoding=None, errors="strict", atomic=False):
+ binary = "b" in mode
+
+ # Standard streams first. These are simple because they don't need
+ # special handling for the atomic flag. It's entirely ignored.
+ if filename == "-":
+ if any(m in mode for m in ["w", "a", "x"]):
+ if binary:
+ return get_binary_stdout(), False
+ return get_text_stdout(encoding=encoding, errors=errors), False
+ if binary:
+ return get_binary_stdin(), False
+ return get_text_stdin(encoding=encoding, errors=errors), False
+
+ # Non-atomic writes directly go out through the regular open functions.
+ if not atomic:
+ return _wrap_io_open(filename, mode, encoding, errors), True
+
+ # Some usability stuff for atomic writes
+ if "a" in mode:
+ raise ValueError(
+ "Appending to an existing file is not supported, because that"
+ " would involve an expensive `copy`-operation to a temporary"
+ " file. Open the file in normal `w`-mode and copy explicitly"
+ " if that's what you're after."
+ )
+ if "x" in mode:
+ raise ValueError("Use the `overwrite`-parameter instead.")
+ if "w" not in mode:
+ raise ValueError("Atomic writes only make sense with `w`-mode.")
+
+ # Atomic writes are more complicated. They work by opening a file
+ # as a proxy in the same folder and then using the fdopen
+ # functionality to wrap it in a Python file. Then we wrap it in an
+ # atomic file that moves the file over on close.
+ import errno
+ import random
+
+ try:
+ perm = os.stat(filename).st_mode
+ except OSError:
+ perm = None
+
+ flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
+
+ if binary:
+ flags |= getattr(os, "O_BINARY", 0)
+
+ while True:
+ tmp_filename = os.path.join(
+ os.path.dirname(filename),
+ f".__atomic-write{random.randrange(1 << 32):08x}",
+ )
+ try:
+ fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
+ break
+ except OSError as e:
+ if e.errno == errno.EEXIST or (
+ os.name == "nt"
+ and e.errno == errno.EACCES
+ and os.path.isdir(e.filename)
+ and os.access(e.filename, os.W_OK)
+ ):
+ continue
+ raise
+
+ if perm is not None:
+ os.chmod(tmp_filename, perm) # in case perm includes bits in umask
+
+ f = _wrap_io_open(fd, mode, encoding, errors)
+ return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
+
+
+class _AtomicFile:
+ def __init__(self, f, tmp_filename, real_filename):
+ self._f = f
+ self._tmp_filename = tmp_filename
+ self._real_filename = real_filename
+ self.closed = False
+
+ @property
+ def name(self):
+ return self._real_filename
+
+ def close(self, delete=False):
+ if self.closed:
+ return
+ self._f.close()
+ os.replace(self._tmp_filename, self._real_filename)
+ self.closed = True
+
+ def __getattr__(self, name):
+ return getattr(self._f, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close(delete=exc_type is not None)
+
+ def __repr__(self):
+ return repr(self._f)
+
+
+def strip_ansi(value):
+ return _ansi_re.sub("", value)
+
+
+def _is_jupyter_kernel_output(stream):
+ if WIN:
+ # TODO: Couldn't test on Windows, should't try to support until
+ # someone tests the details wrt colorama.
+ return
+
+ while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
+ stream = stream._stream
+
+ return stream.__class__.__module__.startswith("ipykernel.")
+
+
+def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ if stream is None:
+ stream = sys.stdin
+ return not isatty(stream) and not _is_jupyter_kernel_output(stream)
+ return not color
+
+
+# If we're on Windows, we provide transparent integration through
+# colorama. This will make ANSI colors through the echo function
+# work automatically.
+if WIN:
+ # Windows has a smaller terminal
+ DEFAULT_COLUMNS = 79
+
+ from ._winconsole import _get_windows_console_stream
+
+ def _get_argv_encoding():
+ import locale
+
+ return locale.getpreferredencoding()
+
+ try:
+ import colorama
+ except ImportError:
+ pass
+ else:
+ _ansi_stream_wrappers = WeakKeyDictionary()
+
+ def auto_wrap_for_ansi(stream, color=None):
+ """This function wraps a stream so that calls through colorama
+ are issued to the win32 console API to recolor on demand. It
+ also ensures to reset the colors if a write call is interrupted
+ to not destroy the console afterwards.
+ """
+ try:
+ cached = _ansi_stream_wrappers.get(stream)
+ except Exception:
+ cached = None
+ if cached is not None:
+ return cached
+ strip = should_strip_ansi(stream, color)
+ ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
+ rv = ansi_wrapper.stream
+ _write = rv.write
+
+ def _safe_write(s):
+ try:
+ return _write(s)
+ except BaseException:
+ ansi_wrapper.reset_all()
+ raise
+
+ rv.write = _safe_write
+ try:
+ _ansi_stream_wrappers[stream] = rv
+ except Exception:
+ pass
+ return rv
+
+ def get_winterm_size():
+ win = colorama.win32.GetConsoleScreenBufferInfo(
+ colorama.win32.STDOUT
+ ).srWindow
+ return win.Right - win.Left, win.Bottom - win.Top
+
+
+else:
+
+ def _get_argv_encoding():
+ return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding()
+
+ def _get_windows_console_stream(f, encoding, errors):
+ return None
+
+
+def term_len(x):
+ return len(strip_ansi(x))
+
+
+def isatty(stream):
+ try:
+ return stream.isatty()
+ except Exception:
+ return False
+
+
+def _make_cached_stream_func(src_func, wrapper_func):
+ cache = WeakKeyDictionary()
+
+ def func():
+ stream = src_func()
+ try:
+ rv = cache.get(stream)
+ except Exception:
+ rv = None
+ if rv is not None:
+ return rv
+ rv = wrapper_func()
+ try:
+ stream = src_func() # In case wrapper_func() modified the stream
+ cache[stream] = rv
+ except Exception:
+ pass
+ return rv
+
+ return func
+
+
+_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
+_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
+_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
+
+
+binary_streams = {
+ "stdin": get_binary_stdin,
+ "stdout": get_binary_stdout,
+ "stderr": get_binary_stderr,
+}
+
+text_streams = {
+ "stdin": get_text_stdin,
+ "stdout": get_text_stdout,
+ "stderr": get_text_stderr,
+}
diff --git a/libs/dynaconf/vendor/click/_termui_impl.py b/libs/dynaconf/vendor/click/_termui_impl.py
new file mode 100644
index 000000000..78372503d
--- /dev/null
+++ b/libs/dynaconf/vendor/click/_termui_impl.py
@@ -0,0 +1,667 @@
+"""
+This module contains implementations for the termui module. To keep the
+import time of Click down, some infrequently used functionality is
+placed in this module and only imported as needed.
+"""
+import contextlib
+import math
+import os
+import sys
+import time
+
+from ._compat import _default_text_stdout
+from ._compat import CYGWIN
+from ._compat import get_best_encoding
+from ._compat import isatty
+from ._compat import open_stream
+from ._compat import strip_ansi
+from ._compat import term_len
+from ._compat import WIN
+from .exceptions import ClickException
+from .utils import echo
+
+if os.name == "nt":
+ BEFORE_BAR = "\r"
+ AFTER_BAR = "\n"
+else:
+ BEFORE_BAR = "\r\033[?25l"
+ AFTER_BAR = "\033[?25h\n"
+
+
+def _length_hint(obj):
+ """Returns the length hint of an object."""
+ try:
+ return len(obj)
+ except (AttributeError, TypeError):
+ try:
+ get_hint = type(obj).__length_hint__
+ except AttributeError:
+ return None
+ try:
+ hint = get_hint(obj)
+ except TypeError:
+ return None
+ if hint is NotImplemented or not isinstance(hint, int) or hint < 0:
+ return None
+ return hint
+
+
+class ProgressBar:
+ def __init__(
+ self,
+ iterable,
+ length=None,
+ fill_char="#",
+ empty_char=" ",
+ bar_template="%(bar)s",
+ info_sep=" ",
+ show_eta=True,
+ show_percent=None,
+ show_pos=False,
+ item_show_func=None,
+ label=None,
+ file=None,
+ color=None,
+ width=30,
+ ):
+ self.fill_char = fill_char
+ self.empty_char = empty_char
+ self.bar_template = bar_template
+ self.info_sep = info_sep
+ self.show_eta = show_eta
+ self.show_percent = show_percent
+ self.show_pos = show_pos
+ self.item_show_func = item_show_func
+ self.label = label or ""
+ if file is None:
+ file = _default_text_stdout()
+ self.file = file
+ self.color = color
+ self.width = width
+ self.autowidth = width == 0
+
+ if length is None:
+ length = _length_hint(iterable)
+ if iterable is None:
+ if length is None:
+ raise TypeError("iterable or length is required")
+ iterable = range(length)
+ self.iter = iter(iterable)
+ self.length = length
+ self.length_known = length is not None
+ self.pos = 0
+ self.avg = []
+ self.start = self.last_eta = time.time()
+ self.eta_known = False
+ self.finished = False
+ self.max_width = None
+ self.entered = False
+ self.current_item = None
+ self.is_hidden = not isatty(self.file)
+ self._last_line = None
+ self.short_limit = 0.5
+
+ def __enter__(self):
+ self.entered = True
+ self.render_progress()
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.render_finish()
+
+ def __iter__(self):
+ if not self.entered:
+ raise RuntimeError("You need to use progress bars in a with block.")
+ self.render_progress()
+ return self.generator()
+
+ def __next__(self):
+ # Iteration is defined in terms of a generator function,
+ # returned by iter(self); use that to define next(). This works
+ # because `self.iter` is an iterable consumed by that generator,
+ # so it is re-entry safe. Calling `next(self.generator())`
+ # twice works and does "what you want".
+ return next(iter(self))
+
+ def is_fast(self):
+ return time.time() - self.start <= self.short_limit
+
+ def render_finish(self):
+ if self.is_hidden or self.is_fast():
+ return
+ self.file.write(AFTER_BAR)
+ self.file.flush()
+
+ @property
+ def pct(self):
+ if self.finished:
+ return 1.0
+ return min(self.pos / (float(self.length) or 1), 1.0)
+
+ @property
+ def time_per_iteration(self):
+ if not self.avg:
+ return 0.0
+ return sum(self.avg) / float(len(self.avg))
+
+ @property
+ def eta(self):
+ if self.length_known and not self.finished:
+ return self.time_per_iteration * (self.length - self.pos)
+ return 0.0
+
+ def format_eta(self):
+ if self.eta_known:
+ t = int(self.eta)
+ seconds = t % 60
+ t //= 60
+ minutes = t % 60
+ t //= 60
+ hours = t % 24
+ t //= 24
+ if t > 0:
+ return f"{t}d {hours:02}:{minutes:02}:{seconds:02}"
+ else:
+ return f"{hours:02}:{minutes:02}:{seconds:02}"
+ return ""
+
+ def format_pos(self):
+ pos = str(self.pos)
+ if self.length_known:
+ pos += f"/{self.length}"
+ return pos
+
+ def format_pct(self):
+ return f"{int(self.pct * 100): 4}%"[1:]
+
+ def format_bar(self):
+ if self.length_known:
+ bar_length = int(self.pct * self.width)
+ bar = self.fill_char * bar_length
+ bar += self.empty_char * (self.width - bar_length)
+ elif self.finished:
+ bar = self.fill_char * self.width
+ else:
+ bar = list(self.empty_char * (self.width or 1))
+ if self.time_per_iteration != 0:
+ bar[
+ int(
+ (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
+ * self.width
+ )
+ ] = self.fill_char
+ bar = "".join(bar)
+ return bar
+
+ def format_progress_line(self):
+ show_percent = self.show_percent
+
+ info_bits = []
+ if self.length_known and show_percent is None:
+ show_percent = not self.show_pos
+
+ if self.show_pos:
+ info_bits.append(self.format_pos())
+ if show_percent:
+ info_bits.append(self.format_pct())
+ if self.show_eta and self.eta_known and not self.finished:
+ info_bits.append(self.format_eta())
+ if self.item_show_func is not None:
+ item_info = self.item_show_func(self.current_item)
+ if item_info is not None:
+ info_bits.append(item_info)
+
+ return (
+ self.bar_template
+ % {
+ "label": self.label,
+ "bar": self.format_bar(),
+ "info": self.info_sep.join(info_bits),
+ }
+ ).rstrip()
+
+ def render_progress(self):
+ from .termui import get_terminal_size
+
+ if self.is_hidden:
+ return
+
+ buf = []
+ # Update width in case the terminal has been resized
+ if self.autowidth:
+ old_width = self.width
+ self.width = 0
+ clutter_length = term_len(self.format_progress_line())
+ new_width = max(0, get_terminal_size()[0] - clutter_length)
+ if new_width < old_width:
+ buf.append(BEFORE_BAR)
+ buf.append(" " * self.max_width)
+ self.max_width = new_width
+ self.width = new_width
+
+ clear_width = self.width
+ if self.max_width is not None:
+ clear_width = self.max_width
+
+ buf.append(BEFORE_BAR)
+ line = self.format_progress_line()
+ line_len = term_len(line)
+ if self.max_width is None or self.max_width < line_len:
+ self.max_width = line_len
+
+ buf.append(line)
+ buf.append(" " * (clear_width - line_len))
+ line = "".join(buf)
+ # Render the line only if it changed.
+
+ if line != self._last_line and not self.is_fast():
+ self._last_line = line
+ echo(line, file=self.file, color=self.color, nl=False)
+ self.file.flush()
+
+ def make_step(self, n_steps):
+ self.pos += n_steps
+ if self.length_known and self.pos >= self.length:
+ self.finished = True
+
+ if (time.time() - self.last_eta) < 1.0:
+ return
+
+ self.last_eta = time.time()
+
+ # self.avg is a rolling list of length <= 7 of steps where steps are
+ # defined as time elapsed divided by the total progress through
+ # self.length.
+ if self.pos:
+ step = (time.time() - self.start) / self.pos
+ else:
+ step = time.time() - self.start
+
+ self.avg = self.avg[-6:] + [step]
+
+ self.eta_known = self.length_known
+
+ def update(self, n_steps, current_item=None):
+ """Update the progress bar by advancing a specified number of
+ steps, and optionally set the ``current_item`` for this new
+ position.
+
+ :param n_steps: Number of steps to advance.
+ :param current_item: Optional item to set as ``current_item``
+ for the updated position.
+
+ .. versionadded:: 8.0
+ Added the ``current_item`` optional parameter.
+ """
+ self.make_step(n_steps)
+ if current_item is not None:
+ self.current_item = current_item
+ self.render_progress()
+
+ def finish(self):
+ self.eta_known = 0
+ self.current_item = None
+ self.finished = True
+
+ def generator(self):
+ """Return a generator which yields the items added to the bar
+ during construction, and updates the progress bar *after* the
+ yielded block returns.
+ """
+ # WARNING: the iterator interface for `ProgressBar` relies on
+ # this and only works because this is a simple generator which
+ # doesn't create or manage additional state. If this function
+ # changes, the impact should be evaluated both against
+ # `iter(bar)` and `next(bar)`. `next()` in particular may call
+ # `self.generator()` repeatedly, and this must remain safe in
+ # order for that interface to work.
+ if not self.entered:
+ raise RuntimeError("You need to use progress bars in a with block.")
+
+ if self.is_hidden:
+ yield from self.iter
+ else:
+ for rv in self.iter:
+ self.current_item = rv
+ yield rv
+ self.update(1)
+ self.finish()
+ self.render_progress()
+
+
+def pager(generator, color=None):
+ """Decide what method to use for paging through text."""
+ stdout = _default_text_stdout()
+ if not isatty(sys.stdin) or not isatty(stdout):
+ return _nullpager(stdout, generator, color)
+ pager_cmd = (os.environ.get("PAGER", None) or "").strip()
+ if pager_cmd:
+ if WIN:
+ return _tempfilepager(generator, pager_cmd, color)
+ return _pipepager(generator, pager_cmd, color)
+ if os.environ.get("TERM") in ("dumb", "emacs"):
+ return _nullpager(stdout, generator, color)
+ if WIN or sys.platform.startswith("os2"):
+ return _tempfilepager(generator, "more <", color)
+ if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
+ return _pipepager(generator, "less", color)
+
+ import tempfile
+
+ fd, filename = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ if hasattr(os, "system") and os.system(f'more "{filename}"') == 0:
+ return _pipepager(generator, "more", color)
+ return _nullpager(stdout, generator, color)
+ finally:
+ os.unlink(filename)
+
+
+def _pipepager(generator, cmd, color):
+ """Page through text by feeding it to another program. Invoking a
+ pager through this might support colors.
+ """
+ import subprocess
+
+ env = dict(os.environ)
+
+ # If we're piping to less we might support colors under the
+ # condition that
+ cmd_detail = cmd.rsplit("/", 1)[-1].split()
+ if color is None and cmd_detail[0] == "less":
+ less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}"
+ if not less_flags:
+ env["LESS"] = "-R"
+ color = True
+ elif "r" in less_flags or "R" in less_flags:
+ color = True
+
+ c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)
+ encoding = get_best_encoding(c.stdin)
+ try:
+ for text in generator:
+ if not color:
+ text = strip_ansi(text)
+
+ c.stdin.write(text.encode(encoding, "replace"))
+ except (OSError, KeyboardInterrupt):
+ pass
+ else:
+ c.stdin.close()
+
+ # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
+ # search or other commands inside less).
+ #
+ # That means when the user hits ^C, the parent process (click) terminates,
+ # but less is still alive, paging the output and messing up the terminal.
+ #
+ # If the user wants to make the pager exit on ^C, they should set
+ # `LESS='-K'`. It's not our decision to make.
+ while True:
+ try:
+ c.wait()
+ except KeyboardInterrupt:
+ pass
+ else:
+ break
+
+
+def _tempfilepager(generator, cmd, color):
+ """Page through text by invoking a program on a temporary file."""
+ import tempfile
+
+ filename = tempfile.mktemp()
+ # TODO: This never terminates if the passed generator never terminates.
+ text = "".join(generator)
+ if not color:
+ text = strip_ansi(text)
+ encoding = get_best_encoding(sys.stdout)
+ with open_stream(filename, "wb")[0] as f:
+ f.write(text.encode(encoding))
+ try:
+ os.system(f'{cmd} "{filename}"')
+ finally:
+ os.unlink(filename)
+
+
+def _nullpager(stream, generator, color):
+ """Simply print unformatted text. This is the ultimate fallback."""
+ for text in generator:
+ if not color:
+ text = strip_ansi(text)
+ stream.write(text)
+
+
+class Editor:
+ def __init__(self, editor=None, env=None, require_save=True, extension=".txt"):
+ self.editor = editor
+ self.env = env
+ self.require_save = require_save
+ self.extension = extension
+
+ def get_editor(self):
+ if self.editor is not None:
+ return self.editor
+ for key in "VISUAL", "EDITOR":
+ rv = os.environ.get(key)
+ if rv:
+ return rv
+ if WIN:
+ return "notepad"
+ for editor in "sensible-editor", "vim", "nano":
+ if os.system(f"which {editor} >/dev/null 2>&1") == 0:
+ return editor
+ return "vi"
+
+ def edit_file(self, filename):
+ import subprocess
+
+ editor = self.get_editor()
+ if self.env:
+ environ = os.environ.copy()
+ environ.update(self.env)
+ else:
+ environ = None
+ try:
+ c = subprocess.Popen(f'{editor} "{filename}"', env=environ, shell=True)
+ exit_code = c.wait()
+ if exit_code != 0:
+ raise ClickException(f"{editor}: Editing failed!")
+ except OSError as e:
+ raise ClickException(f"{editor}: Editing failed: {e}")
+
+ def edit(self, text):
+ import tempfile
+
+ text = text or ""
+ binary_data = type(text) in [bytes, bytearray]
+
+ if not binary_data and text and not text.endswith("\n"):
+ text += "\n"
+
+ fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
+ try:
+ if not binary_data:
+ if WIN:
+ encoding = "utf-8-sig"
+ text = text.replace("\n", "\r\n")
+ else:
+ encoding = "utf-8"
+ text = text.encode(encoding)
+
+ f = os.fdopen(fd, "wb")
+ f.write(text)
+ f.close()
+ timestamp = os.path.getmtime(name)
+
+ self.edit_file(name)
+
+ if self.require_save and os.path.getmtime(name) == timestamp:
+ return None
+
+ f = open(name, "rb")
+ try:
+ rv = f.read()
+ finally:
+ f.close()
+ if binary_data:
+ return rv
+ else:
+ return rv.decode("utf-8-sig").replace("\r\n", "\n")
+ finally:
+ os.unlink(name)
+
+
+def open_url(url, wait=False, locate=False):
+ import subprocess
+
+ def _unquote_file(url):
+ import urllib
+
+ if url.startswith("file://"):
+ url = urllib.unquote(url[7:])
+ return url
+
+ if sys.platform == "darwin":
+ args = ["open"]
+ if wait:
+ args.append("-W")
+ if locate:
+ args.append("-R")
+ args.append(_unquote_file(url))
+ null = open("/dev/null", "w")
+ try:
+ return subprocess.Popen(args, stderr=null).wait()
+ finally:
+ null.close()
+ elif WIN:
+ if locate:
+ url = _unquote_file(url.replace('"', ""))
+ args = f'explorer /select,"{url}"'
+ else:
+ url = url.replace('"', "")
+ wait = "/WAIT" if wait else ""
+ args = f'start {wait} "" "{url}"'
+ return os.system(args)
+ elif CYGWIN:
+ if locate:
+ url = os.path.dirname(_unquote_file(url).replace('"', ""))
+ args = f'cygstart "{url}"'
+ else:
+ url = url.replace('"', "")
+ wait = "-w" if wait else ""
+ args = f'cygstart {wait} "{url}"'
+ return os.system(args)
+
+ try:
+ if locate:
+ url = os.path.dirname(_unquote_file(url)) or "."
+ else:
+ url = _unquote_file(url)
+ c = subprocess.Popen(["xdg-open", url])
+ if wait:
+ return c.wait()
+ return 0
+ except OSError:
+ if url.startswith(("http://", "https://")) and not locate and not wait:
+ import webbrowser
+
+ webbrowser.open(url)
+ return 0
+ return 1
+
+
+def _translate_ch_to_exc(ch):
+ if ch == "\x03":
+ raise KeyboardInterrupt()
+ if ch == "\x04" and not WIN: # Unix-like, Ctrl+D
+ raise EOFError()
+ if ch == "\x1a" and WIN: # Windows, Ctrl+Z
+ raise EOFError()
+
+
+if WIN:
+ import msvcrt
+
+ @contextlib.contextmanager
+ def raw_terminal():
+ yield
+
+ def getchar(echo):
+ # The function `getch` will return a bytes object corresponding to
+ # the pressed character. Since Windows 10 build 1803, it will also
+ # return \x00 when called a second time after pressing a regular key.
+ #
+ # `getwch` does not share this probably-bugged behavior. Moreover, it
+ # returns a Unicode object by default, which is what we want.
+ #
+ # Either of these functions will return \x00 or \xe0 to indicate
+ # a special key, and you need to call the same function again to get
+ # the "rest" of the code. The fun part is that \u00e0 is
+ # "latin small letter a with grave", so if you type that on a French
+ # keyboard, you _also_ get a \xe0.
+ # E.g., consider the Up arrow. This returns \xe0 and then \x48. The
+ # resulting Unicode string reads as "a with grave" + "capital H".
+ # This is indistinguishable from when the user actually types
+ # "a with grave" and then "capital H".
+ #
+ # When \xe0 is returned, we assume it's part of a special-key sequence
+ # and call `getwch` again, but that means that when the user types
+ # the \u00e0 character, `getchar` doesn't return until a second
+ # character is typed.
+ # The alternative is returning immediately, but that would mess up
+ # cross-platform handling of arrow keys and others that start with
+ # \xe0. Another option is using `getch`, but then we can't reliably
+ # read non-ASCII characters, because return values of `getch` are
+ # limited to the current 8-bit codepage.
+ #
+ # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
+ # is doing the right thing in more situations than with `getch`.
+ if echo:
+ func = msvcrt.getwche
+ else:
+ func = msvcrt.getwch
+
+ rv = func()
+ if rv in ("\x00", "\xe0"):
+ # \x00 and \xe0 are control characters that indicate special key,
+ # see above.
+ rv += func()
+ _translate_ch_to_exc(rv)
+ return rv
+
+
+else:
+ import tty
+ import termios
+
+ @contextlib.contextmanager
+ def raw_terminal():
+ if not isatty(sys.stdin):
+ f = open("/dev/tty")
+ fd = f.fileno()
+ else:
+ fd = sys.stdin.fileno()
+ f = None
+ try:
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ yield fd
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ sys.stdout.flush()
+ if f is not None:
+ f.close()
+ except termios.error:
+ pass
+
+ def getchar(echo):
+ with raw_terminal() as fd:
+ ch = os.read(fd, 32)
+ ch = ch.decode(get_best_encoding(sys.stdin), "replace")
+ if echo and isatty(sys.stdout):
+ sys.stdout.write(ch)
+ _translate_ch_to_exc(ch)
+ return ch
diff --git a/libs/dynaconf/vendor/click/_textwrap.py b/libs/dynaconf/vendor/click/_textwrap.py
new file mode 100644
index 000000000..7a052b70d
--- /dev/null
+++ b/libs/dynaconf/vendor/click/_textwrap.py
@@ -0,0 +1,37 @@
+import textwrap
+from contextlib import contextmanager
+
+
+class TextWrapper(textwrap.TextWrapper):
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ space_left = max(width - cur_len, 1)
+
+ if self.break_long_words:
+ last = reversed_chunks[-1]
+ cut = last[:space_left]
+ res = last[space_left:]
+ cur_line.append(cut)
+ reversed_chunks[-1] = res
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ @contextmanager
+ def extra_indent(self, indent):
+ old_initial_indent = self.initial_indent
+ old_subsequent_indent = self.subsequent_indent
+ self.initial_indent += indent
+ self.subsequent_indent += indent
+ try:
+ yield
+ finally:
+ self.initial_indent = old_initial_indent
+ self.subsequent_indent = old_subsequent_indent
+
+ def indent_only(self, text):
+ rv = []
+ for idx, line in enumerate(text.splitlines()):
+ indent = self.initial_indent
+ if idx > 0:
+ indent = self.subsequent_indent
+ rv.append(f"{indent}{line}")
+ return "\n".join(rv)
diff --git a/libs/dynaconf/vendor/click/_unicodefun.py b/libs/dynaconf/vendor/click/_unicodefun.py
new file mode 100644
index 000000000..53ec9d267
--- /dev/null
+++ b/libs/dynaconf/vendor/click/_unicodefun.py
@@ -0,0 +1,82 @@
+import codecs
+import os
+
+
+def _verify_python_env():
+ """Ensures that the environment is good for Unicode."""
+ try:
+ import locale
+
+ fs_enc = codecs.lookup(locale.getpreferredencoding()).name
+ except Exception:
+ fs_enc = "ascii"
+ if fs_enc != "ascii":
+ return
+
+ extra = ""
+ if os.name == "posix":
+ import subprocess
+
+ try:
+ rv = subprocess.Popen(
+ ["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ ).communicate()[0]
+ except OSError:
+ rv = b""
+ good_locales = set()
+ has_c_utf8 = False
+
+ # Make sure we're operating on text here.
+ if isinstance(rv, bytes):
+ rv = rv.decode("ascii", "replace")
+
+ for line in rv.splitlines():
+ locale = line.strip()
+ if locale.lower().endswith((".utf-8", ".utf8")):
+ good_locales.add(locale)
+ if locale.lower() in ("c.utf8", "c.utf-8"):
+ has_c_utf8 = True
+
+ extra += "\n\n"
+ if not good_locales:
+ extra += (
+ "Additional information: on this system no suitable"
+ " UTF-8 locales were discovered. This most likely"
+ " requires resolving by reconfiguring the locale"
+ " system."
+ )
+ elif has_c_utf8:
+ extra += (
+ "This system supports the C.UTF-8 locale which is"
+ " recommended. You might be able to resolve your issue"
+ " by exporting the following environment variables:\n\n"
+ " export LC_ALL=C.UTF-8\n"
+ " export LANG=C.UTF-8"
+ )
+ else:
+ extra += (
+ "This system lists some UTF-8 supporting locales that"
+ " you can pick from. The following suitable locales"
+ f" were discovered: {', '.join(sorted(good_locales))}"
+ )
+
+ bad_locale = None
+ for locale in os.environ.get("LC_ALL"), os.environ.get("LANG"):
+ if locale and locale.lower().endswith((".utf-8", ".utf8")):
+ bad_locale = locale
+ if locale is not None:
+ break
+ if bad_locale is not None:
+ extra += (
+ "\n\nClick discovered that you exported a UTF-8 locale"
+ " but the locale system could not pick up from it"
+ " because it does not exist. The exported locale is"
+ f" {bad_locale!r} but it is not supported"
+ )
+
+ raise RuntimeError(
+ "Click will abort further execution because Python was"
+ " configured to use ASCII as encoding for the environment."
+ " Consult https://click.palletsprojects.com/unicode-support/"
+ f" for mitigation steps.{extra}"
+ )
diff --git a/libs/dynaconf/vendor/click/_winconsole.py b/libs/dynaconf/vendor/click/_winconsole.py
new file mode 100644
index 000000000..923fdba65
--- /dev/null
+++ b/libs/dynaconf/vendor/click/_winconsole.py
@@ -0,0 +1,308 @@
+# This module is based on the excellent work by Adam Bartoš who
+# provided a lot of what went into the implementation here in
+# the discussion to issue1602 in the Python bug tracker.
+#
+# There are some general differences in regards to how this works
+# compared to the original patches as we do not need to patch
+# the entire interpreter but just work in our little world of
+# echo and prompt.
+import ctypes
+import io
+import time
+from ctypes import byref
+from ctypes import c_char
+from ctypes import c_char_p
+from ctypes import c_int
+from ctypes import c_ssize_t
+from ctypes import c_ulong
+from ctypes import c_void_p
+from ctypes import POINTER
+from ctypes import py_object
+from ctypes import windll
+from ctypes import WINFUNCTYPE
+from ctypes.wintypes import DWORD
+from ctypes.wintypes import HANDLE
+from ctypes.wintypes import LPCWSTR
+from ctypes.wintypes import LPWSTR
+
+import msvcrt
+
+from ._compat import _NonClosingTextIOWrapper
+
+try:
+ from ctypes import pythonapi
+except ImportError:
+ pythonapi = None
+else:
+ PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
+ PyBuffer_Release = pythonapi.PyBuffer_Release
+
+
+c_ssize_p = POINTER(c_ssize_t)
+
+kernel32 = windll.kernel32
+GetStdHandle = kernel32.GetStdHandle
+ReadConsoleW = kernel32.ReadConsoleW
+WriteConsoleW = kernel32.WriteConsoleW
+GetConsoleMode = kernel32.GetConsoleMode
+GetLastError = kernel32.GetLastError
+GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
+CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
+ ("CommandLineToArgvW", windll.shell32)
+)
+LocalFree = WINFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)(
+ ("LocalFree", windll.kernel32)
+)
+
+
+STDIN_HANDLE = GetStdHandle(-10)
+STDOUT_HANDLE = GetStdHandle(-11)
+STDERR_HANDLE = GetStdHandle(-12)
+
+
+PyBUF_SIMPLE = 0
+PyBUF_WRITABLE = 1
+
+ERROR_SUCCESS = 0
+ERROR_NOT_ENOUGH_MEMORY = 8
+ERROR_OPERATION_ABORTED = 995
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+EOF = b"\x1a"
+MAX_BYTES_WRITTEN = 32767
+
+
+class Py_buffer(ctypes.Structure):
+ _fields_ = [
+ ("buf", c_void_p),
+ ("obj", py_object),
+ ("len", c_ssize_t),
+ ("itemsize", c_ssize_t),
+ ("readonly", c_int),
+ ("ndim", c_int),
+ ("format", c_char_p),
+ ("shape", c_ssize_p),
+ ("strides", c_ssize_p),
+ ("suboffsets", c_ssize_p),
+ ("internal", c_void_p),
+ ]
+
+
+# On PyPy we cannot get buffers so our ability to operate here is
+# severely limited.
+if pythonapi is None:
+ get_buffer = None
+else:
+
+ def get_buffer(obj, writable=False):
+ buf = Py_buffer()
+ flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
+ PyObject_GetBuffer(py_object(obj), byref(buf), flags)
+ try:
+ buffer_type = c_char * buf.len
+ return buffer_type.from_address(buf.buf)
+ finally:
+ PyBuffer_Release(byref(buf))
+
+
+class _WindowsConsoleRawIOBase(io.RawIOBase):
+ def __init__(self, handle):
+ self.handle = handle
+
+ def isatty(self):
+ io.RawIOBase.isatty(self)
+ return True
+
+
+class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
+ def readable(self):
+ return True
+
+ def readinto(self, b):
+ bytes_to_be_read = len(b)
+ if not bytes_to_be_read:
+ return 0
+ elif bytes_to_be_read % 2:
+ raise ValueError(
+ "cannot read odd number of bytes from UTF-16-LE encoded console"
+ )
+
+ buffer = get_buffer(b, writable=True)
+ code_units_to_be_read = bytes_to_be_read // 2
+ code_units_read = c_ulong()
+
+ rv = ReadConsoleW(
+ HANDLE(self.handle),
+ buffer,
+ code_units_to_be_read,
+ byref(code_units_read),
+ None,
+ )
+ if GetLastError() == ERROR_OPERATION_ABORTED:
+ # wait for KeyboardInterrupt
+ time.sleep(0.1)
+ if not rv:
+ raise OSError(f"Windows error: {GetLastError()}")
+
+ if buffer[0] == EOF:
+ return 0
+ return 2 * code_units_read.value
+
+
+class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
+ def writable(self):
+ return True
+
+ @staticmethod
+ def _get_error_message(errno):
+ if errno == ERROR_SUCCESS:
+ return "ERROR_SUCCESS"
+ elif errno == ERROR_NOT_ENOUGH_MEMORY:
+ return "ERROR_NOT_ENOUGH_MEMORY"
+ return f"Windows error {errno}"
+
+ def write(self, b):
+ bytes_to_be_written = len(b)
+ buf = get_buffer(b)
+ code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
+ code_units_written = c_ulong()
+
+ WriteConsoleW(
+ HANDLE(self.handle),
+ buf,
+ code_units_to_be_written,
+ byref(code_units_written),
+ None,
+ )
+ bytes_written = 2 * code_units_written.value
+
+ if bytes_written == 0 and bytes_to_be_written > 0:
+ raise OSError(self._get_error_message(GetLastError()))
+ return bytes_written
+
+
+class ConsoleStream:
+ def __init__(self, text_stream, byte_stream):
+ self._text_stream = text_stream
+ self.buffer = byte_stream
+
+ @property
+ def name(self):
+ return self.buffer.name
+
+ def write(self, x):
+ if isinstance(x, str):
+ return self._text_stream.write(x)
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(x)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __getattr__(self, name):
+ return getattr(self._text_stream, name)
+
+ def isatty(self):
+ return self.buffer.isatty()
+
+ def __repr__(self):
+ return f"<ConsoleStream name={self.name!r} encoding={self.encoding!r}>"
+
+
+class WindowsChunkedWriter:
+ """
+ Wraps a stream (such as stdout), acting as a transparent proxy for all
+ attribute access apart from method 'write()' which we wrap to write in
+ limited chunks due to a Windows limitation on binary console streams.
+ """
+
+ def __init__(self, wrapped):
+ # double-underscore everything to prevent clashes with names of
+ # attributes on the wrapped stream object.
+ self.__wrapped = wrapped
+
+ def __getattr__(self, name):
+ return getattr(self.__wrapped, name)
+
+ def write(self, text):
+ total_to_write = len(text)
+ written = 0
+
+ while written < total_to_write:
+ to_write = min(total_to_write - written, MAX_BYTES_WRITTEN)
+ self.__wrapped.write(text[written : written + to_write])
+ written += to_write
+
+
+def _get_text_stdin(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stdout(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stderr(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+_stream_factories = {
+ 0: _get_text_stdin,
+ 1: _get_text_stdout,
+ 2: _get_text_stderr,
+}
+
+
+def _is_console(f):
+ if not hasattr(f, "fileno"):
+ return False
+
+ try:
+ fileno = f.fileno()
+ except OSError:
+ return False
+
+ handle = msvcrt.get_osfhandle(fileno)
+ return bool(GetConsoleMode(handle, byref(DWORD())))
+
+
+def _get_windows_console_stream(f, encoding, errors):
+ if (
+ get_buffer is not None
+ and encoding in {"utf-16-le", None}
+ and errors in {"strict", None}
+ and _is_console(f)
+ ):
+ func = _stream_factories.get(f.fileno())
+ if func is not None:
+ f = getattr(f, "buffer", None)
+
+ if f is None:
+ return None
+
+ return func(f)
diff --git a/libs/dynaconf/vendor/click/core.py b/libs/dynaconf/vendor/click/core.py
new file mode 100644
index 000000000..b7124df4f
--- /dev/null
+++ b/libs/dynaconf/vendor/click/core.py
@@ -0,0 +1,2070 @@
+import errno
+import inspect
+import os
+import sys
+from contextlib import contextmanager
+from functools import update_wrapper
+from itertools import repeat
+
+from ._unicodefun import _verify_python_env
+from .exceptions import Abort
+from .exceptions import BadParameter
+from .exceptions import ClickException
+from .exceptions import Exit
+from .exceptions import MissingParameter
+from .exceptions import UsageError
+from .formatting import HelpFormatter
+from .formatting import join_options
+from .globals import pop_context
+from .globals import push_context
+from .parser import OptionParser
+from .parser import split_opt
+from .termui import confirm
+from .termui import prompt
+from .termui import style
+from .types import BOOL
+from .types import convert_type
+from .types import IntRange
+from .utils import echo
+from .utils import make_default_short_help
+from .utils import make_str
+from .utils import PacifyFlushWrapper
+
+_missing = object()
+
+SUBCOMMAND_METAVAR = "COMMAND [ARGS]..."
+SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
+
+DEPRECATED_HELP_NOTICE = " (DEPRECATED)"
+DEPRECATED_INVOKE_NOTICE = "DeprecationWarning: The command {name} is deprecated."
+
+
+def _maybe_show_deprecated_notice(cmd):
+ if cmd.deprecated:
+ echo(style(DEPRECATED_INVOKE_NOTICE.format(name=cmd.name), fg="red"), err=True)
+
+
+def fast_exit(code):
+ """Exit without garbage collection, this speeds up exit by about 10ms for
+ things like bash completion.
+ """
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(code)
+
+
+def _bashcomplete(cmd, prog_name, complete_var=None):
+ """Internal handler for the bash completion support."""
+ if complete_var is None:
+ complete_var = f"_{prog_name}_COMPLETE".replace("-", "_").upper()
+ complete_instr = os.environ.get(complete_var)
+ if not complete_instr:
+ return
+
+ from ._bashcomplete import bashcomplete
+
+ if bashcomplete(cmd, prog_name, complete_var, complete_instr):
+ fast_exit(1)
+
+
+def _check_multicommand(base_command, cmd_name, cmd, register=False):
+ if not base_command.chain or not isinstance(cmd, MultiCommand):
+ return
+ if register:
+ hint = (
+ "It is not possible to add multi commands as children to"
+ " another multi command that is in chain mode."
+ )
+ else:
+ hint = (
+ "Found a multi command as subcommand to a multi command"
+ " that is in chain mode. This is not supported."
+ )
+ raise RuntimeError(
+ f"{hint}. Command {base_command.name!r} is set to chain and"
+ f" {cmd_name!r} was added as a subcommand but it in itself is a"
+ f" multi command. ({cmd_name!r} is a {type(cmd).__name__}"
+ f" within a chained {type(base_command).__name__} named"
+ f" {base_command.name!r})."
+ )
+
+
+def batch(iterable, batch_size):
+ return list(zip(*repeat(iter(iterable), batch_size)))
+
+
+@contextmanager
+def augment_usage_errors(ctx, param=None):
+ """Context manager that attaches extra information to exceptions."""
+ try:
+ yield
+ except BadParameter as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ if param is not None and e.param is None:
+ e.param = param
+ raise
+ except UsageError as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ raise
+
+
+def iter_params_for_processing(invocation_order, declaration_order):
+ """Given a sequence of parameters in the order as should be considered
+ for processing and an iterable of parameters that exist, this returns
+ a list in the correct order as they should be processed.
+ """
+
+ def sort_key(item):
+ try:
+ idx = invocation_order.index(item)
+ except ValueError:
+ idx = float("inf")
+ return (not item.is_eager, idx)
+
+ return sorted(declaration_order, key=sort_key)
+
+
+class ParameterSource:
+ """This is an enum that indicates the source of a command line parameter.
+
+ The enum has one of the following values: COMMANDLINE,
+ ENVIRONMENT, DEFAULT, DEFAULT_MAP. The DEFAULT indicates that the
+ default value in the decorator was used. This class should be
+ converted to an enum when Python 2 support is dropped.
+ """
+
+ COMMANDLINE = "COMMANDLINE"
+ ENVIRONMENT = "ENVIRONMENT"
+ DEFAULT = "DEFAULT"
+ DEFAULT_MAP = "DEFAULT_MAP"
+
+ VALUES = {COMMANDLINE, ENVIRONMENT, DEFAULT, DEFAULT_MAP}
+
+ @classmethod
+ def validate(cls, value):
+ """Validate that the specified value is a valid enum.
+
+ This method will raise a ValueError if the value is
+ not a valid enum.
+
+ :param value: the string value to verify
+ """
+ if value not in cls.VALUES:
+ raise ValueError(
+ f"Invalid ParameterSource value: {value!r}. Valid"
+ f" values are: {','.join(cls.VALUES)}"
+ )
+
+
+class Context:
+ """The context is a special internal object that holds state relevant
+ for the script execution at every single level. It's normally invisible
+ to commands unless they opt-in to getting access to it.
+
+ The context is useful as it can pass internal objects around and can
+ control special execution features such as reading data from
+ environment variables.
+
+ A context can be used as context manager in which case it will call
+ :meth:`close` on teardown.
+
+ .. versionadded:: 2.0
+ Added the `resilient_parsing`, `help_option_names`,
+ `token_normalize_func` parameters.
+
+ .. versionadded:: 3.0
+ Added the `allow_extra_args` and `allow_interspersed_args`
+ parameters.
+
+ .. versionadded:: 4.0
+ Added the `color`, `ignore_unknown_options`, and
+ `max_content_width` parameters.
+
+ .. versionadded:: 7.1
+ Added the `show_default` parameter.
+
+ :param command: the command class for this context.
+ :param parent: the parent context.
+ :param info_name: the info name for this invocation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it is usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param obj: an arbitrary object of user data.
+ :param auto_envvar_prefix: the prefix to use for automatic environment
+ variables. If this is `None` then reading
+ from environment variables is disabled. This
+ does not affect manually set environment
+ variables which are always read.
+ :param default_map: a dictionary (like object) with default values
+ for parameters.
+ :param terminal_width: the width of the terminal. The default is
+ inherit from parent context. If no context
+ defines the terminal width then auto
+ detection will be applied.
+ :param max_content_width: the maximum width for content rendered by
+ Click (this currently only affects help
+ pages). This defaults to 80 characters if
+ not overridden. In other words: even if the
+ terminal is larger than that, Click will not
+ format things wider than 80 characters by
+ default. In addition to that, formatters might
+ add some safety mapping on the right.
+ :param resilient_parsing: if this flag is enabled then Click will
+ parse without any interactivity or callback
+ invocation. Default values will also be
+ ignored. This is useful for implementing
+ things such as completion support.
+ :param allow_extra_args: if this is set to `True` then extra arguments
+ at the end will not raise an error and will be
+ kept on the context. The default is to inherit
+ from the command.
+ :param allow_interspersed_args: if this is set to `False` then options
+ and arguments cannot be mixed. The
+ default is to inherit from the command.
+ :param ignore_unknown_options: instructs click to ignore options it does
+ not know and keeps them for later
+ processing.
+ :param help_option_names: optionally a list of strings that define how
+ the default help parameter is named. The
+ default is ``['--help']``.
+ :param token_normalize_func: an optional function that is used to
+ normalize tokens (options, choices,
+ etc.). This for instance can be used to
+ implement case insensitive behavior.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are used in texts that Click prints which is by
+ default not the case. This for instance would affect
+ help output.
+ :param show_default: if True, shows defaults for all options.
+ Even if an option is later created with show_default=False,
+ this command-level setting overrides it.
+ """
+
+ def __init__(
+ self,
+ command,
+ parent=None,
+ info_name=None,
+ obj=None,
+ auto_envvar_prefix=None,
+ default_map=None,
+ terminal_width=None,
+ max_content_width=None,
+ resilient_parsing=False,
+ allow_extra_args=None,
+ allow_interspersed_args=None,
+ ignore_unknown_options=None,
+ help_option_names=None,
+ token_normalize_func=None,
+ color=None,
+ show_default=None,
+ ):
+ #: the parent context or `None` if none exists.
+ self.parent = parent
+ #: the :class:`Command` for this context.
+ self.command = command
+ #: the descriptive information name
+ self.info_name = info_name
+ #: the parsed parameters except if the value is hidden in which
+ #: case it's not remembered.
+ self.params = {}
+ #: the leftover arguments.
+ self.args = []
+ #: protected arguments. These are arguments that are prepended
+ #: to `args` when certain parsing scenarios are encountered but
+ #: must be never propagated to another arguments. This is used
+ #: to implement nested parsing.
+ self.protected_args = []
+ if obj is None and parent is not None:
+ obj = parent.obj
+ #: the user object stored.
+ self.obj = obj
+ self._meta = getattr(parent, "meta", {})
+
+ #: A dictionary (-like object) with defaults for parameters.
+ if (
+ default_map is None
+ and parent is not None
+ and parent.default_map is not None
+ ):
+ default_map = parent.default_map.get(info_name)
+ self.default_map = default_map
+
+ #: This flag indicates if a subcommand is going to be executed. A
+ #: group callback can use this information to figure out if it's
+ #: being executed directly or because the execution flow passes
+ #: onwards to a subcommand. By default it's None, but it can be
+ #: the name of the subcommand to execute.
+ #:
+ #: If chaining is enabled this will be set to ``'*'`` in case
+ #: any commands are executed. It is however not possible to
+ #: figure out which ones. If you require this knowledge you
+ #: should use a :func:`resultcallback`.
+ self.invoked_subcommand = None
+
+ if terminal_width is None and parent is not None:
+ terminal_width = parent.terminal_width
+ #: The width of the terminal (None is autodetection).
+ self.terminal_width = terminal_width
+
+ if max_content_width is None and parent is not None:
+ max_content_width = parent.max_content_width
+ #: The maximum width of formatted content (None implies a sensible
+ #: default which is 80 for most things).
+ self.max_content_width = max_content_width
+
+ if allow_extra_args is None:
+ allow_extra_args = command.allow_extra_args
+ #: Indicates if the context allows extra args or if it should
+ #: fail on parsing.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_extra_args = allow_extra_args
+
+ if allow_interspersed_args is None:
+ allow_interspersed_args = command.allow_interspersed_args
+ #: Indicates if the context allows mixing of arguments and
+ #: options or not.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_interspersed_args = allow_interspersed_args
+
+ if ignore_unknown_options is None:
+ ignore_unknown_options = command.ignore_unknown_options
+ #: Instructs click to ignore options that a command does not
+ #: understand and will store it on the context for later
+ #: processing. This is primarily useful for situations where you
+ #: want to call into external programs. Generally this pattern is
+ #: strongly discouraged because it's not possibly to losslessly
+ #: forward all arguments.
+ #:
+ #: .. versionadded:: 4.0
+ self.ignore_unknown_options = ignore_unknown_options
+
+ if help_option_names is None:
+ if parent is not None:
+ help_option_names = parent.help_option_names
+ else:
+ help_option_names = ["--help"]
+
+ #: The names for the help options.
+ self.help_option_names = help_option_names
+
+ if token_normalize_func is None and parent is not None:
+ token_normalize_func = parent.token_normalize_func
+
+ #: An optional normalization function for tokens. This is
+ #: options, choices, commands etc.
+ self.token_normalize_func = token_normalize_func
+
+ #: Indicates if resilient parsing is enabled. In that case Click
+ #: will do its best to not cause any failures and default values
+ #: will be ignored. Useful for completion.
+ self.resilient_parsing = resilient_parsing
+
+ # If there is no envvar prefix yet, but the parent has one and
+ # the command on this level has a name, we can expand the envvar
+ # prefix automatically.
+ if auto_envvar_prefix is None:
+ if (
+ parent is not None
+ and parent.auto_envvar_prefix is not None
+ and self.info_name is not None
+ ):
+ auto_envvar_prefix = (
+ f"{parent.auto_envvar_prefix}_{self.info_name.upper()}"
+ )
+ else:
+ auto_envvar_prefix = auto_envvar_prefix.upper()
+ if auto_envvar_prefix is not None:
+ auto_envvar_prefix = auto_envvar_prefix.replace("-", "_")
+ self.auto_envvar_prefix = auto_envvar_prefix
+
+ if color is None and parent is not None:
+ color = parent.color
+
+ #: Controls if styling output is wanted or not.
+ self.color = color
+
+ self.show_default = show_default
+
+ self._close_callbacks = []
+ self._depth = 0
+ self._source_by_paramname = {}
+
+ def __enter__(self):
+ self._depth += 1
+ push_context(self)
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self._depth -= 1
+ if self._depth == 0:
+ self.close()
+ pop_context()
+
+ @contextmanager
+ def scope(self, cleanup=True):
+ """This helper method can be used with the context object to promote
+ it to the current thread local (see :func:`get_current_context`).
+ The default behavior of this is to invoke the cleanup functions which
+ can be disabled by setting `cleanup` to `False`. The cleanup
+ functions are typically used for things such as closing file handles.
+
+ If the cleanup is intended the context object can also be directly
+ used as a context manager.
+
+ Example usage::
+
+ with ctx.scope():
+ assert get_current_context() is ctx
+
+ This is equivalent::
+
+ with ctx:
+ assert get_current_context() is ctx
+
+ .. versionadded:: 5.0
+
+ :param cleanup: controls if the cleanup functions should be run or
+ not. The default is to run these functions. In
+ some situations the context only wants to be
+ temporarily pushed in which case this can be disabled.
+ Nested pushes automatically defer the cleanup.
+ """
+ if not cleanup:
+ self._depth += 1
+ try:
+ with self as rv:
+ yield rv
+ finally:
+ if not cleanup:
+ self._depth -= 1
+
+ @property
+ def meta(self):
+ """This is a dictionary which is shared with all the contexts
+ that are nested. It exists so that click utilities can store some
+ state here if they need to. It is however the responsibility of
+ that code to manage this dictionary well.
+
+ The keys are supposed to be unique dotted strings. For instance
+ module paths are a good choice for it. What is stored in there is
+ irrelevant for the operation of click. However what is important is
+ that code that places data here adheres to the general semantics of
+ the system.
+
+ Example usage::
+
+ LANG_KEY = f'{__name__}.lang'
+
+ def set_language(value):
+ ctx = get_current_context()
+ ctx.meta[LANG_KEY] = value
+
+ def get_language():
+ return get_current_context().meta.get(LANG_KEY, 'en_US')
+
+ .. versionadded:: 5.0
+ """
+ return self._meta
+
+ def make_formatter(self):
+ """Creates the formatter for the help and usage output."""
+ return HelpFormatter(
+ width=self.terminal_width, max_width=self.max_content_width
+ )
+
+ def call_on_close(self, f):
+ """This decorator remembers a function as callback that should be
+ executed when the context tears down. This is most useful to bind
+ resource handling to the script execution. For instance, file objects
+ opened by the :class:`File` type will register their close callbacks
+ here.
+
+ :param f: the function to execute on teardown.
+ """
+ self._close_callbacks.append(f)
+ return f
+
+ def close(self):
+ """Invokes all close callbacks."""
+ for cb in self._close_callbacks:
+ cb()
+ self._close_callbacks = []
+
+ @property
+ def command_path(self):
+ """The computed command path. This is used for the ``usage``
+ information on the help page. It's automatically created by
+ combining the info names of the chain of contexts to the root.
+ """
+ rv = ""
+ if self.info_name is not None:
+ rv = self.info_name
+ if self.parent is not None:
+ rv = f"{self.parent.command_path} {rv}"
+ return rv.lstrip()
+
+ def find_root(self):
+ """Finds the outermost context."""
+ node = self
+ while node.parent is not None:
+ node = node.parent
+ return node
+
+ def find_object(self, object_type):
+ """Finds the closest object of a given type."""
+ node = self
+ while node is not None:
+ if isinstance(node.obj, object_type):
+ return node.obj
+ node = node.parent
+
+ def ensure_object(self, object_type):
+ """Like :meth:`find_object` but sets the innermost object to a
+ new instance of `object_type` if it does not exist.
+ """
+ rv = self.find_object(object_type)
+ if rv is None:
+ self.obj = rv = object_type()
+ return rv
+
+ def lookup_default(self, name):
+ """Looks up the default for a parameter name. This by default
+ looks into the :attr:`default_map` if available.
+ """
+ if self.default_map is not None:
+ rv = self.default_map.get(name)
+ if callable(rv):
+ rv = rv()
+ return rv
+
+ def fail(self, message):
+ """Aborts the execution of the program with a specific error
+ message.
+
+ :param message: the error message to fail with.
+ """
+ raise UsageError(message, self)
+
+ def abort(self):
+ """Aborts the script."""
+ raise Abort()
+
+ def exit(self, code=0):
+ """Exits the application with a given exit code."""
+ raise Exit(code)
+
+ def get_usage(self):
+ """Helper method to get formatted usage string for the current
+ context and command.
+ """
+ return self.command.get_usage(self)
+
+ def get_help(self):
+ """Helper method to get formatted help page for the current
+ context and command.
+ """
+ return self.command.get_help(self)
+
+ def invoke(*args, **kwargs): # noqa: B902
+ """Invokes a command callback in exactly the way it expects. There
+ are two ways to invoke this method:
+
+ 1. the first argument can be a callback and all other arguments and
+ keyword arguments are forwarded directly to the function.
+ 2. the first argument is a click command object. In that case all
+ arguments are forwarded as well but proper click parameters
+ (options and click arguments) must be keyword arguments and Click
+ will fill in defaults.
+
+ Note that before Click 3.2 keyword arguments were not properly filled
+ in against the intention of this code and no context was created. For
+ more information about this change and why it was done in a bugfix
+ release see :ref:`upgrade-to-3.2`.
+ """
+ self, callback = args[:2]
+ ctx = self
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback. In that case we also fill
+ # in defaults and make a new context for this command.
+ if isinstance(callback, Command):
+ other_cmd = callback
+ callback = other_cmd.callback
+ ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
+ if callback is None:
+ raise TypeError(
+ "The given command does not have a callback that can be invoked."
+ )
+
+ for param in other_cmd.params:
+ if param.name not in kwargs and param.expose_value:
+ kwargs[param.name] = param.get_default(ctx)
+
+ args = args[2:]
+ with augment_usage_errors(self):
+ with ctx:
+ return callback(*args, **kwargs)
+
+ def forward(*args, **kwargs): # noqa: B902
+ """Similar to :meth:`invoke` but fills in default keyword
+ arguments from the current context if the other command expects
+ it. This cannot invoke callbacks directly, only other commands.
+ """
+ self, cmd = args[:2]
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback.
+ if not isinstance(cmd, Command):
+ raise TypeError("Callback is not a command.")
+
+ for param in self.params:
+ if param not in kwargs:
+ kwargs[param] = self.params[param]
+
+ return self.invoke(cmd, **kwargs)
+
+ def set_parameter_source(self, name, source):
+ """Set the source of a parameter.
+
+ This indicates the location from which the value of the
+ parameter was obtained.
+
+ :param name: the name of the command line parameter
+ :param source: the source of the command line parameter, which
+ should be a valid ParameterSource value
+ """
+ ParameterSource.validate(source)
+ self._source_by_paramname[name] = source
+
+ def get_parameter_source(self, name):
+ """Get the source of a parameter.
+
+ This indicates the location from which the value of the
+ parameter was obtained. This can be useful for determining
+ when a user specified an option on the command line that is
+ the same as the default. In that case, the source would be
+ ParameterSource.COMMANDLINE, even though the value of the
+ parameter was equivalent to the default.
+
+ :param name: the name of the command line parameter
+ :returns: the source
+ :rtype: ParameterSource
+ """
+ return self._source_by_paramname[name]
+
+
+class BaseCommand:
+ """The base command implements the minimal API contract of commands.
+ Most code will never use this as it does not implement a lot of useful
+ functionality but it can act as the direct subclass of alternative
+ parsing methods that do not depend on the Click parser.
+
+ For instance, this can be used to bridge Click and other systems like
+ argparse or docopt.
+
+ Because base commands do not implement a lot of the API that other
+ parts of Click take for granted, they are not supported for all
+ operations. For instance, they cannot be used with the decorators
+ usually and they have no built-in callback system.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ """
+
+ #: the default for the :attr:`Context.allow_extra_args` flag.
+ allow_extra_args = False
+ #: the default for the :attr:`Context.allow_interspersed_args` flag.
+ allow_interspersed_args = True
+ #: the default for the :attr:`Context.ignore_unknown_options` flag.
+ ignore_unknown_options = False
+
+ def __init__(self, name, context_settings=None):
+ #: the name the command thinks it has. Upon registering a command
+ #: on a :class:`Group` the group will default the command name
+ #: with this information. You should instead use the
+ #: :class:`Context`\'s :attr:`~Context.info_name` attribute.
+ self.name = name
+ if context_settings is None:
+ context_settings = {}
+ #: an optional dictionary with defaults passed to the context.
+ self.context_settings = context_settings
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} {self.name}>"
+
+ def get_usage(self, ctx):
+ raise NotImplementedError("Base commands cannot get usage")
+
+ def get_help(self, ctx):
+ raise NotImplementedError("Base commands cannot get help")
+
+ def make_context(self, info_name, args, parent=None, **extra):
+ """This function when given an info name and arguments will kick
+ off the parsing and create a new :class:`Context`. It does not
+ invoke the actual command callback though.
+
+ :param info_name: the info name for this invokation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it's usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param args: the arguments to parse as list of strings.
+ :param parent: the parent context if available.
+ :param extra: extra keyword arguments forwarded to the context
+ constructor.
+ """
+ for key, value in self.context_settings.items():
+ if key not in extra:
+ extra[key] = value
+ ctx = Context(self, info_name=info_name, parent=parent, **extra)
+ with ctx.scope(cleanup=False):
+ self.parse_args(ctx, args)
+ return ctx
+
+ def parse_args(self, ctx, args):
+ """Given a context and a list of arguments this creates the parser
+ and parses the arguments, then modifies the context as necessary.
+ This is automatically invoked by :meth:`make_context`.
+ """
+ raise NotImplementedError("Base commands do not know how to parse arguments.")
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the command. The default
+ implementation is raising a not implemented error.
+ """
+ raise NotImplementedError("Base commands are not invokable by default")
+
+ def main(
+ self,
+ args=None,
+ prog_name=None,
+ complete_var=None,
+ standalone_mode=True,
+ **extra,
+ ):
+ """This is the way to invoke a script with all the bells and
+ whistles as a command line application. This will always terminate
+ the application after a call. If this is not wanted, ``SystemExit``
+ needs to be caught.
+
+ This method is also available by directly calling the instance of
+ a :class:`Command`.
+
+ .. versionadded:: 3.0
+ Added the `standalone_mode` flag to control the standalone mode.
+
+ :param args: the arguments that should be used for parsing. If not
+ provided, ``sys.argv[1:]`` is used.
+ :param prog_name: the program name that should be used. By default
+ the program name is constructed by taking the file
+ name from ``sys.argv[0]``.
+ :param complete_var: the environment variable that controls the
+ bash completion support. The default is
+ ``"_<prog_name>_COMPLETE"`` with prog_name in
+ uppercase.
+ :param standalone_mode: the default behavior is to invoke the script
+ in standalone mode. Click will then
+ handle exceptions and convert them into
+ error messages and the function will never
+ return but shut down the interpreter. If
+ this is set to `False` they will be
+ propagated to the caller and the return
+ value of this function is the return value
+ of :meth:`invoke`.
+ :param extra: extra keyword arguments are forwarded to the context
+ constructor. See :class:`Context` for more information.
+ """
+ # Verify that the environment is configured correctly, or reject
+ # further execution to avoid a broken script.
+ _verify_python_env()
+
+ if args is None:
+ args = sys.argv[1:]
+ else:
+ args = list(args)
+
+ if prog_name is None:
+ prog_name = make_str(
+ os.path.basename(sys.argv[0] if sys.argv else __file__)
+ )
+
+ # Hook for the Bash completion. This only activates if the Bash
+ # completion is actually enabled, otherwise this is quite a fast
+ # noop.
+ _bashcomplete(self, prog_name, complete_var)
+
+ try:
+ try:
+ with self.make_context(prog_name, args, **extra) as ctx:
+ rv = self.invoke(ctx)
+ if not standalone_mode:
+ return rv
+ # it's not safe to `ctx.exit(rv)` here!
+ # note that `rv` may actually contain data like "1" which
+ # has obvious effects
+ # more subtle case: `rv=[None, None]` can come out of
+ # chained commands which all returned `None` -- so it's not
+ # even always obvious that `rv` indicates success/failure
+ # by its truthiness/falsiness
+ ctx.exit()
+ except (EOFError, KeyboardInterrupt):
+ echo(file=sys.stderr)
+ raise Abort()
+ except ClickException as e:
+ if not standalone_mode:
+ raise
+ e.show()
+ sys.exit(e.exit_code)
+ except OSError as e:
+ if e.errno == errno.EPIPE:
+ sys.stdout = PacifyFlushWrapper(sys.stdout)
+ sys.stderr = PacifyFlushWrapper(sys.stderr)
+ sys.exit(1)
+ else:
+ raise
+ except Exit as e:
+ if standalone_mode:
+ sys.exit(e.exit_code)
+ else:
+ # in non-standalone mode, return the exit code
+ # note that this is only reached if `self.invoke` above raises
+ # an Exit explicitly -- thus bypassing the check there which
+ # would return its result
+ # the results of non-standalone execution may therefore be
+ # somewhat ambiguous: if there are codepaths which lead to
+ # `ctx.exit(1)` and to `return 1`, the caller won't be able to
+ # tell the difference between the two
+ return e.exit_code
+ except Abort:
+ if not standalone_mode:
+ raise
+ echo("Aborted!", file=sys.stderr)
+ sys.exit(1)
+
+ def __call__(self, *args, **kwargs):
+ """Alias for :meth:`main`."""
+ return self.main(*args, **kwargs)
+
+
+class Command(BaseCommand):
+ """Commands are the basic building block of command line interfaces in
+ Click. A basic command handles command line parsing and might dispatch
+ more parsing to commands nested below it.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+ .. versionchanged:: 8.0
+ Added repr showing the command name
+ .. versionchanged:: 7.1
+ Added the `no_args_is_help` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ :param callback: the callback to invoke. This is optional.
+ :param params: the parameters to register with this command. This can
+ be either :class:`Option` or :class:`Argument` objects.
+ :param help: the help string to use for this command.
+ :param epilog: like the help string but it's printed at the end of the
+ help page after everything else.
+ :param short_help: the short help to use for this command. This is
+ shown on the command listing of the parent command.
+ :param add_help_option: by default each command registers a ``--help``
+ option. This can be disabled by this parameter.
+ :param no_args_is_help: this controls what happens if no arguments are
+ provided. This option is disabled by default.
+ If enabled this will add ``--help`` as argument
+ if no arguments are passed
+ :param hidden: hide this command from help outputs.
+
+ :param deprecated: issues a message indicating that
+ the command is deprecated.
+ """
+
+ def __init__(
+ self,
+ name,
+ context_settings=None,
+ callback=None,
+ params=None,
+ help=None,
+ epilog=None,
+ short_help=None,
+ options_metavar="[OPTIONS]",
+ add_help_option=True,
+ no_args_is_help=False,
+ hidden=False,
+ deprecated=False,
+ ):
+ BaseCommand.__init__(self, name, context_settings)
+ #: the callback to execute when the command fires. This might be
+ #: `None` in which case nothing happens.
+ self.callback = callback
+ #: the list of parameters for this command in the order they
+ #: should show up in the help page and execute. Eager parameters
+ #: will automatically be handled before non eager ones.
+ self.params = params or []
+ # if a form feed (page break) is found in the help text, truncate help
+ # text to the content preceding the first form feed
+ if help and "\f" in help:
+ help = help.split("\f", 1)[0]
+ self.help = help
+ self.epilog = epilog
+ self.options_metavar = options_metavar
+ self.short_help = short_help
+ self.add_help_option = add_help_option
+ self.no_args_is_help = no_args_is_help
+ self.hidden = hidden
+ self.deprecated = deprecated
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} {self.name}>"
+
+ def get_usage(self, ctx):
+ """Formats the usage line into a string and returns it.
+
+ Calls :meth:`format_usage` internally.
+ """
+ formatter = ctx.make_formatter()
+ self.format_usage(ctx, formatter)
+ return formatter.getvalue().rstrip("\n")
+
+ def get_params(self, ctx):
+ rv = self.params
+ help_option = self.get_help_option(ctx)
+ if help_option is not None:
+ rv = rv + [help_option]
+ return rv
+
+ def format_usage(self, ctx, formatter):
+ """Writes the usage line into the formatter.
+
+ This is a low-level method called by :meth:`get_usage`.
+ """
+ pieces = self.collect_usage_pieces(ctx)
+ formatter.write_usage(ctx.command_path, " ".join(pieces))
+
+ def collect_usage_pieces(self, ctx):
+ """Returns all the pieces that go into the usage line and returns
+ it as a list of strings.
+ """
+ rv = [self.options_metavar]
+ for param in self.get_params(ctx):
+ rv.extend(param.get_usage_pieces(ctx))
+ return rv
+
+ def get_help_option_names(self, ctx):
+ """Returns the names for the help option."""
+ all_names = set(ctx.help_option_names)
+ for param in self.params:
+ all_names.difference_update(param.opts)
+ all_names.difference_update(param.secondary_opts)
+ return all_names
+
+ def get_help_option(self, ctx):
+ """Returns the help option object."""
+ help_options = self.get_help_option_names(ctx)
+ if not help_options or not self.add_help_option:
+ return
+
+ def show_help(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ return Option(
+ help_options,
+ is_flag=True,
+ is_eager=True,
+ expose_value=False,
+ callback=show_help,
+ help="Show this message and exit.",
+ )
+
+ def make_parser(self, ctx):
+ """Creates the underlying option parser for this command."""
+ parser = OptionParser(ctx)
+ for param in self.get_params(ctx):
+ param.add_to_parser(parser, ctx)
+ return parser
+
+ def get_help(self, ctx):
+ """Formats the help into a string and returns it.
+
+ Calls :meth:`format_help` internally.
+ """
+ formatter = ctx.make_formatter()
+ self.format_help(ctx, formatter)
+ return formatter.getvalue().rstrip("\n")
+
+ def get_short_help_str(self, limit=45):
+ """Gets short help for the command or makes it by shortening the
+ long help string.
+ """
+ return (
+ self.short_help
+ or self.help
+ and make_default_short_help(self.help, limit)
+ or ""
+ )
+
+ def format_help(self, ctx, formatter):
+ """Writes the help into the formatter if it exists.
+
+ This is a low-level method called by :meth:`get_help`.
+
+ This calls the following methods:
+
+ - :meth:`format_usage`
+ - :meth:`format_help_text`
+ - :meth:`format_options`
+ - :meth:`format_epilog`
+ """
+ self.format_usage(ctx, formatter)
+ self.format_help_text(ctx, formatter)
+ self.format_options(ctx, formatter)
+ self.format_epilog(ctx, formatter)
+
+ def format_help_text(self, ctx, formatter):
+ """Writes the help text to the formatter if it exists."""
+ if self.help:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ help_text = self.help
+ if self.deprecated:
+ help_text += DEPRECATED_HELP_NOTICE
+ formatter.write_text(help_text)
+ elif self.deprecated:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(DEPRECATED_HELP_NOTICE)
+
+ def format_options(self, ctx, formatter):
+ """Writes all the options into the formatter if they exist."""
+ opts = []
+ for param in self.get_params(ctx):
+ rv = param.get_help_record(ctx)
+ if rv is not None:
+ opts.append(rv)
+
+ if opts:
+ with formatter.section("Options"):
+ formatter.write_dl(opts)
+
+ def format_epilog(self, ctx, formatter):
+ """Writes the epilog into the formatter if it exists."""
+ if self.epilog:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(self.epilog)
+
+ def parse_args(self, ctx, args):
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ parser = self.make_parser(ctx)
+ opts, args, param_order = parser.parse_args(args=args)
+
+ for param in iter_params_for_processing(param_order, self.get_params(ctx)):
+ value, args = param.handle_parse_result(ctx, opts, args)
+
+ if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
+ ctx.fail(
+ "Got unexpected extra"
+ f" argument{'s' if len(args) != 1 else ''}"
+ f" ({' '.join(map(make_str, args))})"
+ )
+
+ ctx.args = args
+ return args
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the attached callback (if it exists)
+ in the right way.
+ """
+ _maybe_show_deprecated_notice(self)
+ if self.callback is not None:
+ return ctx.invoke(self.callback, **ctx.params)
+
+
+class MultiCommand(Command):
+ """A multi command is the basic implementation of a command that
+ dispatches to subcommands. The most common version is the
+ :class:`Group`.
+
+ :param invoke_without_command: this controls how the multi command itself
+ is invoked. By default it's only invoked
+ if a subcommand is provided.
+ :param no_args_is_help: this controls what happens if no arguments are
+ provided. This option is enabled by default if
+ `invoke_without_command` is disabled or disabled
+ if it's enabled. If enabled this will add
+ ``--help`` as argument if no arguments are
+ passed.
+ :param subcommand_metavar: the string that is used in the documentation
+ to indicate the subcommand place.
+ :param chain: if this is set to `True` chaining of multiple subcommands
+ is enabled. This restricts the form of commands in that
+ they cannot have optional arguments but it allows
+ multiple commands to be chained together.
+ :param result_callback: the result callback to attach to this multi
+ command.
+ """
+
+ allow_extra_args = True
+ allow_interspersed_args = False
+
+ def __init__(
+ self,
+ name=None,
+ invoke_without_command=False,
+ no_args_is_help=None,
+ subcommand_metavar=None,
+ chain=False,
+ result_callback=None,
+ **attrs,
+ ):
+ Command.__init__(self, name, **attrs)
+ if no_args_is_help is None:
+ no_args_is_help = not invoke_without_command
+ self.no_args_is_help = no_args_is_help
+ self.invoke_without_command = invoke_without_command
+ if subcommand_metavar is None:
+ if chain:
+ subcommand_metavar = SUBCOMMANDS_METAVAR
+ else:
+ subcommand_metavar = SUBCOMMAND_METAVAR
+ self.subcommand_metavar = subcommand_metavar
+ self.chain = chain
+ #: The result callback that is stored. This can be set or
+ #: overridden with the :func:`resultcallback` decorator.
+ self.result_callback = result_callback
+
+ if self.chain:
+ for param in self.params:
+ if isinstance(param, Argument) and not param.required:
+ raise RuntimeError(
+ "Multi commands in chain mode cannot have"
+ " optional arguments."
+ )
+
+ def collect_usage_pieces(self, ctx):
+ rv = Command.collect_usage_pieces(self, ctx)
+ rv.append(self.subcommand_metavar)
+ return rv
+
+ def format_options(self, ctx, formatter):
+ Command.format_options(self, ctx, formatter)
+ self.format_commands(ctx, formatter)
+
+ def resultcallback(self, replace=False):
+ """Adds a result callback to the chain command. By default if a
+ result callback is already registered this will chain them but
+ this can be disabled with the `replace` parameter. The result
+ callback is invoked with the return value of the subcommand
+ (or the list of return values from all subcommands if chaining
+ is enabled) as well as the parameters as they would be passed
+ to the main callback.
+
+ Example::
+
+ @click.group()
+ @click.option('-i', '--input', default=23)
+ def cli(input):
+ return 42
+
+ @cli.resultcallback()
+ def process_result(result, input):
+ return result + input
+
+ .. versionadded:: 3.0
+
+ :param replace: if set to `True` an already existing result
+ callback will be removed.
+ """
+
+ def decorator(f):
+ old_callback = self.result_callback
+ if old_callback is None or replace:
+ self.result_callback = f
+ return f
+
+ def function(__value, *args, **kwargs):
+ return f(old_callback(__value, *args, **kwargs), *args, **kwargs)
+
+ self.result_callback = rv = update_wrapper(function, f)
+ return rv
+
+ return decorator
+
+ def format_commands(self, ctx, formatter):
+ """Extra format methods for multi methods that adds all the commands
+ after the options.
+ """
+ commands = []
+ for subcommand in self.list_commands(ctx):
+ cmd = self.get_command(ctx, subcommand)
+ # What is this, the tool lied about a command. Ignore it
+ if cmd is None:
+ continue
+ if cmd.hidden:
+ continue
+
+ commands.append((subcommand, cmd))
+
+ # allow for 3 times the default spacing
+ if len(commands):
+ limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
+
+ rows = []
+ for subcommand, cmd in commands:
+ help = cmd.get_short_help_str(limit)
+ rows.append((subcommand, help))
+
+ if rows:
+ with formatter.section("Commands"):
+ formatter.write_dl(rows)
+
+ def parse_args(self, ctx, args):
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ rest = Command.parse_args(self, ctx, args)
+ if self.chain:
+ ctx.protected_args = rest
+ ctx.args = []
+ elif rest:
+ ctx.protected_args, ctx.args = rest[:1], rest[1:]
+
+ return ctx.args
+
+ def invoke(self, ctx):
+ def _process_result(value):
+ if self.result_callback is not None:
+ value = ctx.invoke(self.result_callback, value, **ctx.params)
+ return value
+
+ if not ctx.protected_args:
+ # If we are invoked without command the chain flag controls
+ # how this happens. If we are not in chain mode, the return
+ # value here is the return value of the command.
+ # If however we are in chain mode, the return value is the
+ # return value of the result processor invoked with an empty
+ # list (which means that no subcommand actually was executed).
+ if self.invoke_without_command:
+ if not self.chain:
+ return Command.invoke(self, ctx)
+ with ctx:
+ Command.invoke(self, ctx)
+ return _process_result([])
+ ctx.fail("Missing command.")
+
+ # Fetch args back out
+ args = ctx.protected_args + ctx.args
+ ctx.args = []
+ ctx.protected_args = []
+
+ # If we're not in chain mode, we only allow the invocation of a
+ # single command but we also inform the current context about the
+ # name of the command to invoke.
+ if not self.chain:
+ # Make sure the context is entered so we do not clean up
+ # resources until the result processor has worked.
+ with ctx:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ ctx.invoked_subcommand = cmd_name
+ Command.invoke(self, ctx)
+ sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
+ with sub_ctx:
+ return _process_result(sub_ctx.command.invoke(sub_ctx))
+
+ # In chain mode we create the contexts step by step, but after the
+ # base command has been invoked. Because at that point we do not
+ # know the subcommands yet, the invoked subcommand attribute is
+ # set to ``*`` to inform the command that subcommands are executed
+ # but nothing else.
+ with ctx:
+ ctx.invoked_subcommand = "*" if args else None
+ Command.invoke(self, ctx)
+
+ # Otherwise we make every single context and invoke them in a
+ # chain. In that case the return value to the result processor
+ # is the list of all invoked subcommand's results.
+ contexts = []
+ while args:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ sub_ctx = cmd.make_context(
+ cmd_name,
+ args,
+ parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False,
+ )
+ contexts.append(sub_ctx)
+ args, sub_ctx.args = sub_ctx.args, []
+
+ rv = []
+ for sub_ctx in contexts:
+ with sub_ctx:
+ rv.append(sub_ctx.command.invoke(sub_ctx))
+ return _process_result(rv)
+
+ def resolve_command(self, ctx, args):
+ cmd_name = make_str(args[0])
+ original_cmd_name = cmd_name
+
+ # Get the command
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we can't find the command but there is a normalization
+ # function available, we try with that one.
+ if cmd is None and ctx.token_normalize_func is not None:
+ cmd_name = ctx.token_normalize_func(cmd_name)
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we don't find the command we want to show an error message
+ # to the user that it was not provided. However, there is
+ # something else we should do: if the first argument looks like
+ # an option we want to kick off parsing again for arguments to
+ # resolve things like --help which now should go to the main
+ # place.
+ if cmd is None and not ctx.resilient_parsing:
+ if split_opt(cmd_name)[0]:
+ self.parse_args(ctx, ctx.args)
+ ctx.fail(f"No such command '{original_cmd_name}'.")
+
+ return cmd_name, cmd, args[1:]
+
+ def get_command(self, ctx, cmd_name):
+ """Given a context and a command name, this returns a
+ :class:`Command` object if it exists or returns `None`.
+ """
+ raise NotImplementedError()
+
+ def list_commands(self, ctx):
+ """Returns a list of subcommand names in the order they should
+ appear.
+ """
+ return []
+
+
+class Group(MultiCommand):
+ """A group allows a command to have subcommands attached. This is the
+ most common way to implement nesting in Click.
+
+ :param commands: a dictionary of commands.
+ """
+
+ def __init__(self, name=None, commands=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: the registered subcommands by their exported names.
+ self.commands = commands or {}
+
+ def add_command(self, cmd, name=None):
+ """Registers another :class:`Command` with this group. If the name
+ is not provided, the name of the command is used.
+ """
+ name = name or cmd.name
+ if name is None:
+ raise TypeError("Command has no name.")
+ _check_multicommand(self, name, cmd, register=True)
+ self.commands[name] = cmd
+
+ def command(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a command to
+ the group. This takes the same arguments as :func:`command` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ from .decorators import command
+
+ def decorator(f):
+ cmd = command(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+
+ return decorator
+
+ def group(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a group to
+ the group. This takes the same arguments as :func:`group` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ from .decorators import group
+
+ def decorator(f):
+ cmd = group(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+
+ return decorator
+
+ def get_command(self, ctx, cmd_name):
+ return self.commands.get(cmd_name)
+
+ def list_commands(self, ctx):
+ return sorted(self.commands)
+
+
+class CommandCollection(MultiCommand):
+ """A command collection is a multi command that merges multiple multi
+ commands together into one. This is a straightforward implementation
+ that accepts a list of different multi commands as sources and
+ provides all the commands for each of them.
+ """
+
+ def __init__(self, name=None, sources=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: The list of registered multi commands.
+ self.sources = sources or []
+
+ def add_source(self, multi_cmd):
+ """Adds a new multi command to the chain dispatcher."""
+ self.sources.append(multi_cmd)
+
+ def get_command(self, ctx, cmd_name):
+ for source in self.sources:
+ rv = source.get_command(ctx, cmd_name)
+ if rv is not None:
+ if self.chain:
+ _check_multicommand(self, cmd_name, rv)
+ return rv
+
+ def list_commands(self, ctx):
+ rv = set()
+ for source in self.sources:
+ rv.update(source.list_commands(ctx))
+ return sorted(rv)
+
+
+class Parameter:
+ r"""A parameter to a command comes in two versions: they are either
+ :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
+ not supported by design as some of the internals for parsing are
+ intentionally not finalized.
+
+ Some settings are supported by both options and arguments.
+
+ :param param_decls: the parameter declarations for this option or
+ argument. This is a list of flags or argument
+ names.
+ :param type: the type that should be used. Either a :class:`ParamType`
+ or a Python type. The later is converted into the former
+ automatically if supported.
+ :param required: controls if this is optional or not.
+ :param default: the default value if omitted. This can also be a callable,
+ in which case it's invoked when the default is needed
+ without any arguments.
+ :param callback: a callback that should be executed after the parameter
+ was matched. This is called as ``fn(ctx, param,
+ value)`` and needs to return the value.
+ :param nargs: the number of arguments to match. If not ``1`` the return
+ value is a tuple instead of single value. The default for
+ nargs is ``1`` (except if the type is a tuple, then it's
+ the arity of the tuple). If ``nargs=-1``, all remaining
+ parameters are collected.
+ :param metavar: how the value is represented in the help page.
+ :param expose_value: if this is `True` then the value is passed onwards
+ to the command callback and stored on the context,
+ otherwise it's skipped.
+ :param is_eager: eager values are processed before non eager ones. This
+ should not be set for arguments or it will inverse the
+ order of processing.
+ :param envvar: a string or list of strings that are environment variables
+ that should be checked.
+
+ .. versionchanged:: 7.1
+ Empty environment variables are ignored rather than taking the
+ empty string value. This makes it possible for scripts to clear
+ variables if they can't unset them.
+
+ .. versionchanged:: 2.0
+ Changed signature for parameter callback to also be passed the
+ parameter. The old callback format will still work, but it will
+ raise a warning to give you a chance to migrate the code easier.
+ """
+ param_type_name = "parameter"
+
+ def __init__(
+ self,
+ param_decls=None,
+ type=None,
+ required=False,
+ default=None,
+ callback=None,
+ nargs=None,
+ metavar=None,
+ expose_value=True,
+ is_eager=False,
+ envvar=None,
+ autocompletion=None,
+ ):
+ self.name, self.opts, self.secondary_opts = self._parse_decls(
+ param_decls or (), expose_value
+ )
+
+ self.type = convert_type(type, default)
+
+ # Default nargs to what the type tells us if we have that
+ # information available.
+ if nargs is None:
+ if self.type.is_composite:
+ nargs = self.type.arity
+ else:
+ nargs = 1
+
+ self.required = required
+ self.callback = callback
+ self.nargs = nargs
+ self.multiple = False
+ self.expose_value = expose_value
+ self.default = default
+ self.is_eager = is_eager
+ self.metavar = metavar
+ self.envvar = envvar
+ self.autocompletion = autocompletion
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} {self.name}>"
+
+ @property
+ def human_readable_name(self):
+ """Returns the human readable name of this parameter. This is the
+ same as the name for options, but the metavar for arguments.
+ """
+ return self.name
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ metavar = self.type.get_metavar(self)
+ if metavar is None:
+ metavar = self.type.name.upper()
+ if self.nargs != 1:
+ metavar += "..."
+ return metavar
+
+ def get_default(self, ctx):
+ """Given a context variable this calculates the default value."""
+ # Otherwise go with the regular default.
+ if callable(self.default):
+ rv = self.default()
+ else:
+ rv = self.default
+ return self.type_cast_value(ctx, rv)
+
+ def add_to_parser(self, parser, ctx):
+ pass
+
+ def consume_value(self, ctx, opts):
+ value = opts.get(self.name)
+ source = ParameterSource.COMMANDLINE
+ if value is None:
+ value = self.value_from_envvar(ctx)
+ source = ParameterSource.ENVIRONMENT
+ if value is None:
+ value = ctx.lookup_default(self.name)
+ source = ParameterSource.DEFAULT_MAP
+ if value is not None:
+ ctx.set_parameter_source(self.name, source)
+ return value
+
+ def type_cast_value(self, ctx, value):
+ """Given a value this runs it properly through the type system.
+ This automatically handles things like `nargs` and `multiple` as
+ well as composite types.
+ """
+ if self.type.is_composite:
+ if self.nargs <= 1:
+ raise TypeError(
+ "Attempted to invoke composite type but nargs has"
+ f" been set to {self.nargs}. This is not supported;"
+ " nargs needs to be set to a fixed value > 1."
+ )
+ if self.multiple:
+ return tuple(self.type(x or (), self, ctx) for x in value or ())
+ return self.type(value or (), self, ctx)
+
+ def _convert(value, level):
+ if level == 0:
+ return self.type(value, self, ctx)
+ return tuple(_convert(x, level - 1) for x in value or ())
+
+ return _convert(value, (self.nargs != 1) + bool(self.multiple))
+
+ def process_value(self, ctx, value):
+ """Given a value and context this runs the logic to convert the
+ value as necessary.
+ """
+ # If the value we were given is None we do nothing. This way
+ # code that calls this can easily figure out if something was
+ # not provided. Otherwise it would be converted into an empty
+ # tuple for multiple invocations which is inconvenient.
+ if value is not None:
+ return self.type_cast_value(ctx, value)
+
+ def value_is_missing(self, value):
+ if value is None:
+ return True
+ if (self.nargs != 1 or self.multiple) and value == ():
+ return True
+ return False
+
+ def full_process_value(self, ctx, value):
+ value = self.process_value(ctx, value)
+
+ if value is None and not ctx.resilient_parsing:
+ value = self.get_default(ctx)
+ if value is not None:
+ ctx.set_parameter_source(self.name, ParameterSource.DEFAULT)
+
+ if self.required and self.value_is_missing(value):
+ raise MissingParameter(ctx=ctx, param=self)
+
+ return value
+
+ def resolve_envvar_value(self, ctx):
+ if self.envvar is None:
+ return
+ if isinstance(self.envvar, (tuple, list)):
+ for envvar in self.envvar:
+ rv = os.environ.get(envvar)
+ if rv is not None:
+ return rv
+ else:
+ rv = os.environ.get(self.envvar)
+
+ if rv != "":
+ return rv
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is not None and self.nargs != 1:
+ rv = self.type.split_envvar_value(rv)
+ return rv
+
+ def handle_parse_result(self, ctx, opts, args):
+ with augment_usage_errors(ctx, param=self):
+ value = self.consume_value(ctx, opts)
+ try:
+ value = self.full_process_value(ctx, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+ value = None
+ if self.callback is not None:
+ try:
+ value = self.callback(ctx, self, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+
+ if self.expose_value:
+ ctx.params[self.name] = value
+ return value, args
+
+ def get_help_record(self, ctx):
+ pass
+
+ def get_usage_pieces(self, ctx):
+ return []
+
+ def get_error_hint(self, ctx):
+ """Get a stringified version of the param for use in error messages to
+ indicate which param caused the error.
+ """
+ hint_list = self.opts or [self.human_readable_name]
+ return " / ".join(repr(x) for x in hint_list)
+
+
+class Option(Parameter):
+ """Options are usually optional values on the command line and
+ have some extra features that arguments don't have.
+
+ All other parameters are passed onwards to the parameter constructor.
+
+ :param show_default: controls if the default value should be shown on the
+ help page. Normally, defaults are not shown. If this
+ value is a string, it shows the string instead of the
+ value. This is particularly useful for dynamic options.
+ :param show_envvar: controls if an environment variable should be shown on
+ the help page. Normally, environment variables
+ are not shown.
+ :param prompt: if set to `True` or a non empty string then the user will be
+ prompted for input. If set to `True` the prompt will be the
+ option name capitalized.
+ :param confirmation_prompt: if set then the value will need to be confirmed
+ if it was prompted for.
+ :param hide_input: if this is `True` then the input on the prompt will be
+ hidden from the user. This is useful for password
+ input.
+ :param is_flag: forces this option to act as a flag. The default is
+ auto detection.
+ :param flag_value: which value should be used for this flag if it's
+ enabled. This is set to a boolean automatically if
+ the option string contains a slash to mark two options.
+ :param multiple: if this is set to `True` then the argument is accepted
+ multiple times and recorded. This is similar to ``nargs``
+ in how it works but supports arbitrary number of
+ arguments.
+ :param count: this flag makes an option increment an integer.
+ :param allow_from_autoenv: if this is enabled then the value of this
+ parameter will be pulled from an environment
+ variable in case a prefix is defined on the
+ context.
+ :param help: the help string.
+ :param hidden: hide this option from help outputs.
+ """
+
+ param_type_name = "option"
+
+ def __init__(
+ self,
+ param_decls=None,
+ show_default=False,
+ prompt=False,
+ confirmation_prompt=False,
+ hide_input=False,
+ is_flag=None,
+ flag_value=None,
+ multiple=False,
+ count=False,
+ allow_from_autoenv=True,
+ type=None,
+ help=None,
+ hidden=False,
+ show_choices=True,
+ show_envvar=False,
+ **attrs,
+ ):
+ default_is_missing = attrs.get("default", _missing) is _missing
+ Parameter.__init__(self, param_decls, type=type, **attrs)
+
+ if prompt is True:
+ prompt_text = self.name.replace("_", " ").capitalize()
+ elif prompt is False:
+ prompt_text = None
+ else:
+ prompt_text = prompt
+ self.prompt = prompt_text
+ self.confirmation_prompt = confirmation_prompt
+ self.hide_input = hide_input
+ self.hidden = hidden
+
+ # Flags
+ if is_flag is None:
+ if flag_value is not None:
+ is_flag = True
+ else:
+ is_flag = bool(self.secondary_opts)
+ if is_flag and default_is_missing:
+ self.default = False
+ if flag_value is None:
+ flag_value = not self.default
+ self.is_flag = is_flag
+ self.flag_value = flag_value
+ if self.is_flag and isinstance(self.flag_value, bool) and type in [None, bool]:
+ self.type = BOOL
+ self.is_bool_flag = True
+ else:
+ self.is_bool_flag = False
+
+ # Counting
+ self.count = count
+ if count:
+ if type is None:
+ self.type = IntRange(min=0)
+ if default_is_missing:
+ self.default = 0
+
+ self.multiple = multiple
+ self.allow_from_autoenv = allow_from_autoenv
+ self.help = help
+ self.show_default = show_default
+ self.show_choices = show_choices
+ self.show_envvar = show_envvar
+
+ # Sanity check for stuff we don't support
+ if __debug__:
+ if self.nargs < 0:
+ raise TypeError("Options cannot have nargs < 0")
+ if self.prompt and self.is_flag and not self.is_bool_flag:
+ raise TypeError("Cannot prompt for flags that are not bools.")
+ if not self.is_bool_flag and self.secondary_opts:
+ raise TypeError("Got secondary option for non boolean flag.")
+ if self.is_bool_flag and self.hide_input and self.prompt is not None:
+ raise TypeError("Hidden input does not work with boolean flag prompts.")
+ if self.count:
+ if self.multiple:
+ raise TypeError(
+ "Options cannot be multiple and count at the same time."
+ )
+ elif self.is_flag:
+ raise TypeError(
+ "Options cannot be count and flags at the same time."
+ )
+
+ def _parse_decls(self, decls, expose_value):
+ opts = []
+ secondary_opts = []
+ name = None
+ possible_names = []
+
+ for decl in decls:
+ if decl.isidentifier():
+ if name is not None:
+ raise TypeError("Name defined twice")
+ name = decl
+ else:
+ split_char = ";" if decl[:1] == "/" else "/"
+ if split_char in decl:
+ first, second = decl.split(split_char, 1)
+ first = first.rstrip()
+ if first:
+ possible_names.append(split_opt(first))
+ opts.append(first)
+ second = second.lstrip()
+ if second:
+ secondary_opts.append(second.lstrip())
+ else:
+ possible_names.append(split_opt(decl))
+ opts.append(decl)
+
+ if name is None and possible_names:
+ possible_names.sort(key=lambda x: -len(x[0])) # group long options first
+ name = possible_names[0][1].replace("-", "_").lower()
+ if not name.isidentifier():
+ name = None
+
+ if name is None:
+ if not expose_value:
+ return None, opts, secondary_opts
+ raise TypeError("Could not determine name for option")
+
+ if not opts and not secondary_opts:
+ raise TypeError(
+ f"No options defined but a name was passed ({name})."
+ " Did you mean to declare an argument instead of an"
+ " option?"
+ )
+
+ return name, opts, secondary_opts
+
+ def add_to_parser(self, parser, ctx):
+ kwargs = {
+ "dest": self.name,
+ "nargs": self.nargs,
+ "obj": self,
+ }
+
+ if self.multiple:
+ action = "append"
+ elif self.count:
+ action = "count"
+ else:
+ action = "store"
+
+ if self.is_flag:
+ kwargs.pop("nargs", None)
+ action_const = f"{action}_const"
+ if self.is_bool_flag and self.secondary_opts:
+ parser.add_option(self.opts, action=action_const, const=True, **kwargs)
+ parser.add_option(
+ self.secondary_opts, action=action_const, const=False, **kwargs
+ )
+ else:
+ parser.add_option(
+ self.opts, action=action_const, const=self.flag_value, **kwargs
+ )
+ else:
+ kwargs["action"] = action
+ parser.add_option(self.opts, **kwargs)
+
+ def get_help_record(self, ctx):
+ if self.hidden:
+ return
+ any_prefix_is_slash = []
+
+ def _write_opts(opts):
+ rv, any_slashes = join_options(opts)
+ if any_slashes:
+ any_prefix_is_slash[:] = [True]
+ if not self.is_flag and not self.count:
+ rv += f" {self.make_metavar()}"
+ return rv
+
+ rv = [_write_opts(self.opts)]
+ if self.secondary_opts:
+ rv.append(_write_opts(self.secondary_opts))
+
+ help = self.help or ""
+ extra = []
+ if self.show_envvar:
+ envvar = self.envvar
+ if envvar is None:
+ if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
+ envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
+ if envvar is not None:
+ var_str = (
+ ", ".join(str(d) for d in envvar)
+ if isinstance(envvar, (list, tuple))
+ else envvar
+ )
+ extra.append(f"env var: {var_str}")
+ if self.default is not None and (self.show_default or ctx.show_default):
+ if isinstance(self.show_default, str):
+ default_string = f"({self.show_default})"
+ elif isinstance(self.default, (list, tuple)):
+ default_string = ", ".join(str(d) for d in self.default)
+ elif inspect.isfunction(self.default):
+ default_string = "(dynamic)"
+ else:
+ default_string = self.default
+ extra.append(f"default: {default_string}")
+
+ if self.required:
+ extra.append("required")
+ if extra:
+ extra_str = ";".join(extra)
+ help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
+
+ return ("; " if any_prefix_is_slash else " / ").join(rv), help
+
+ def get_default(self, ctx):
+ # If we're a non boolean flag our default is more complex because
+ # we need to look at all flags in the same group to figure out
+ # if we're the the default one in which case we return the flag
+ # value as default.
+ if self.is_flag and not self.is_bool_flag:
+ for param in ctx.command.params:
+ if param.name == self.name and param.default:
+ return param.flag_value
+ return None
+ return Parameter.get_default(self, ctx)
+
+ def prompt_for_value(self, ctx):
+ """This is an alternative flow that can be activated in the full
+ value processing if a value does not exist. It will prompt the
+ user until a valid value exists and then returns the processed
+ value as result.
+ """
+ # Calculate the default before prompting anything to be stable.
+ default = self.get_default(ctx)
+
+ # If this is a prompt for a flag we need to handle this
+ # differently.
+ if self.is_bool_flag:
+ return confirm(self.prompt, default)
+
+ return prompt(
+ self.prompt,
+ default=default,
+ type=self.type,
+ hide_input=self.hide_input,
+ show_choices=self.show_choices,
+ confirmation_prompt=self.confirmation_prompt,
+ value_proc=lambda x: self.process_value(ctx, x),
+ )
+
+ def resolve_envvar_value(self, ctx):
+ rv = Parameter.resolve_envvar_value(self, ctx)
+ if rv is not None:
+ return rv
+ if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
+ envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
+ return os.environ.get(envvar)
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is None:
+ return None
+ value_depth = (self.nargs != 1) + bool(self.multiple)
+ if value_depth > 0 and rv is not None:
+ rv = self.type.split_envvar_value(rv)
+ if self.multiple and self.nargs != 1:
+ rv = batch(rv, self.nargs)
+ return rv
+
+ def full_process_value(self, ctx, value):
+ if value is None and self.prompt is not None and not ctx.resilient_parsing:
+ return self.prompt_for_value(ctx)
+ return Parameter.full_process_value(self, ctx, value)
+
+
+class Argument(Parameter):
+ """Arguments are positional parameters to a command. They generally
+ provide fewer features than options but can have infinite ``nargs``
+ and are required by default.
+
+ All parameters are passed onwards to the parameter constructor.
+ """
+
+ param_type_name = "argument"
+
+ def __init__(self, param_decls, required=None, **attrs):
+ if required is None:
+ if attrs.get("default") is not None:
+ required = False
+ else:
+ required = attrs.get("nargs", 1) > 0
+ Parameter.__init__(self, param_decls, required=required, **attrs)
+ if self.default is not None and self.nargs < 0:
+ raise TypeError(
+ "nargs=-1 in combination with a default value is not supported."
+ )
+
+ @property
+ def human_readable_name(self):
+ if self.metavar is not None:
+ return self.metavar
+ return self.name.upper()
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ var = self.type.get_metavar(self)
+ if not var:
+ var = self.name.upper()
+ if not self.required:
+ var = f"[{var}]"
+ if self.nargs != 1:
+ var += "..."
+ return var
+
+ def _parse_decls(self, decls, expose_value):
+ if not decls:
+ if not expose_value:
+ return None, [], []
+ raise TypeError("Could not determine name for argument")
+ if len(decls) == 1:
+ name = arg = decls[0]
+ name = name.replace("-", "_").lower()
+ else:
+ raise TypeError(
+ "Arguments take exactly one parameter declaration, got"
+ f" {len(decls)}."
+ )
+ return name, [arg], []
+
+ def get_usage_pieces(self, ctx):
+ return [self.make_metavar()]
+
+ def get_error_hint(self, ctx):
+ return repr(self.make_metavar())
+
+ def add_to_parser(self, parser, ctx):
+ parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
diff --git a/libs/dynaconf/vendor/click/decorators.py b/libs/dynaconf/vendor/click/decorators.py
new file mode 100644
index 000000000..30133051a
--- /dev/null
+++ b/libs/dynaconf/vendor/click/decorators.py
@@ -0,0 +1,331 @@
+import inspect
+import sys
+from functools import update_wrapper
+
+from .core import Argument
+from .core import Command
+from .core import Group
+from .core import Option
+from .globals import get_current_context
+from .utils import echo
+
+
+def pass_context(f):
+ """Marks a callback as wanting to receive the current context
+ object as first argument.
+ """
+
+ def new_func(*args, **kwargs):
+ return f(get_current_context(), *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+
+def pass_obj(f):
+ """Similar to :func:`pass_context`, but only pass the object on the
+ context onwards (:attr:`Context.obj`). This is useful if that object
+ represents the state of a nested system.
+ """
+
+ def new_func(*args, **kwargs):
+ return f(get_current_context().obj, *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+
+def make_pass_decorator(object_type, ensure=False):
+ """Given an object type this creates a decorator that will work
+ similar to :func:`pass_obj` but instead of passing the object of the
+ current context, it will find the innermost context of type
+ :func:`object_type`.
+
+ This generates a decorator that works roughly like this::
+
+ from functools import update_wrapper
+
+ def decorator(f):
+ @pass_context
+ def new_func(ctx, *args, **kwargs):
+ obj = ctx.find_object(object_type)
+ return ctx.invoke(f, obj, *args, **kwargs)
+ return update_wrapper(new_func, f)
+ return decorator
+
+ :param object_type: the type of the object to pass.
+ :param ensure: if set to `True`, a new object will be created and
+ remembered on the context if it's not there yet.
+ """
+
+ def decorator(f):
+ def new_func(*args, **kwargs):
+ ctx = get_current_context()
+ if ensure:
+ obj = ctx.ensure_object(object_type)
+ else:
+ obj = ctx.find_object(object_type)
+ if obj is None:
+ raise RuntimeError(
+ "Managed to invoke callback without a context"
+ f" object of type {object_type.__name__!r}"
+ " existing."
+ )
+ return ctx.invoke(f, obj, *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+ return decorator
+
+
+def _make_command(f, name, attrs, cls):
+ if isinstance(f, Command):
+ raise TypeError("Attempted to convert a callback into a command twice.")
+ try:
+ params = f.__click_params__
+ params.reverse()
+ del f.__click_params__
+ except AttributeError:
+ params = []
+ help = attrs.get("help")
+ if help is None:
+ help = inspect.getdoc(f)
+ if isinstance(help, bytes):
+ help = help.decode("utf-8")
+ else:
+ help = inspect.cleandoc(help)
+ attrs["help"] = help
+ return cls(
+ name=name or f.__name__.lower().replace("_", "-"),
+ callback=f,
+ params=params,
+ **attrs,
+ )
+
+
+def command(name=None, cls=None, **attrs):
+ r"""Creates a new :class:`Command` and uses the decorated function as
+ callback. This will also automatically attach all decorated
+ :func:`option`\s and :func:`argument`\s as parameters to the command.
+
+ The name of the command defaults to the name of the function with
+ underscores replaced by dashes. If you want to change that, you can
+ pass the intended name as the first argument.
+
+ All keyword arguments are forwarded to the underlying command class.
+
+ Once decorated the function turns into a :class:`Command` instance
+ that can be invoked as a command line utility or be attached to a
+ command :class:`Group`.
+
+ :param name: the name of the command. This defaults to the function
+ name with underscores replaced by dashes.
+ :param cls: the command class to instantiate. This defaults to
+ :class:`Command`.
+ """
+ if cls is None:
+ cls = Command
+
+ def decorator(f):
+ cmd = _make_command(f, name, attrs, cls)
+ cmd.__doc__ = f.__doc__
+ return cmd
+
+ return decorator
+
+
+def group(name=None, **attrs):
+ """Creates a new :class:`Group` with a function as callback. This
+ works otherwise the same as :func:`command` just that the `cls`
+ parameter is set to :class:`Group`.
+ """
+ attrs.setdefault("cls", Group)
+ return command(name, **attrs)
+
+
+def _param_memo(f, param):
+ if isinstance(f, Command):
+ f.params.append(param)
+ else:
+ if not hasattr(f, "__click_params__"):
+ f.__click_params__ = []
+ f.__click_params__.append(param)
+
+
+def argument(*param_decls, **attrs):
+ """Attaches an argument to the command. All positional arguments are
+ passed as parameter declarations to :class:`Argument`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Argument` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the argument class to instantiate. This defaults to
+ :class:`Argument`.
+ """
+
+ def decorator(f):
+ ArgumentClass = attrs.pop("cls", Argument)
+ _param_memo(f, ArgumentClass(param_decls, **attrs))
+ return f
+
+ return decorator
+
+
+def option(*param_decls, **attrs):
+ """Attaches an option to the command. All positional arguments are
+ passed as parameter declarations to :class:`Option`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Option` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the option class to instantiate. This defaults to
+ :class:`Option`.
+ """
+
+ def decorator(f):
+ # Issue 926, copy attrs, so pre-defined options can re-use the same cls=
+ option_attrs = attrs.copy()
+
+ if "help" in option_attrs:
+ option_attrs["help"] = inspect.cleandoc(option_attrs["help"])
+ OptionClass = option_attrs.pop("cls", Option)
+ _param_memo(f, OptionClass(param_decls, **option_attrs))
+ return f
+
+ return decorator
+
+
+def confirmation_option(*param_decls, **attrs):
+ """Shortcut for confirmation prompts that can be ignored by passing
+ ``--yes`` as parameter.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+
+ @click.command()
+ @click.option('--yes', is_flag=True, callback=callback,
+ expose_value=False, prompt='Do you want to continue?')
+ def dropdb():
+ pass
+ """
+
+ def decorator(f):
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+
+ attrs.setdefault("is_flag", True)
+ attrs.setdefault("callback", callback)
+ attrs.setdefault("expose_value", False)
+ attrs.setdefault("prompt", "Do you want to continue?")
+ attrs.setdefault("help", "Confirm the action without prompting.")
+ return option(*(param_decls or ("--yes",)), **attrs)(f)
+
+ return decorator
+
+
+def password_option(*param_decls, **attrs):
+ """Shortcut for password prompts.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ @click.command()
+ @click.option('--password', prompt=True, confirmation_prompt=True,
+ hide_input=True)
+ def changeadmin(password):
+ pass
+ """
+
+ def decorator(f):
+ attrs.setdefault("prompt", True)
+ attrs.setdefault("confirmation_prompt", True)
+ attrs.setdefault("hide_input", True)
+ return option(*(param_decls or ("--password",)), **attrs)(f)
+
+ return decorator
+
+
+def version_option(version=None, *param_decls, **attrs):
+ """Adds a ``--version`` option which immediately ends the program
+ printing out the version number. This is implemented as an eager
+ option that prints the version and exits the program in the callback.
+
+ :param version: the version number to show. If not provided Click
+ attempts an auto discovery via setuptools.
+ :param prog_name: the name of the program (defaults to autodetection)
+ :param message: custom message to show instead of the default
+ (``'%(prog)s, version %(version)s'``)
+ :param others: everything else is forwarded to :func:`option`.
+ """
+ if version is None:
+ if hasattr(sys, "_getframe"):
+ module = sys._getframe(1).f_globals.get("__name__")
+ else:
+ module = ""
+
+ def decorator(f):
+ prog_name = attrs.pop("prog_name", None)
+ message = attrs.pop("message", "%(prog)s, version %(version)s")
+
+ def callback(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+ prog = prog_name
+ if prog is None:
+ prog = ctx.find_root().info_name
+ ver = version
+ if ver is None:
+ try:
+ import pkg_resources
+ except ImportError:
+ pass
+ else:
+ for dist in pkg_resources.working_set:
+ scripts = dist.get_entry_map().get("console_scripts") or {}
+ for entry_point in scripts.values():
+ if entry_point.module_name == module:
+ ver = dist.version
+ break
+ if ver is None:
+ raise RuntimeError("Could not determine version")
+ echo(message % {"prog": prog, "version": ver}, color=ctx.color)
+ ctx.exit()
+
+ attrs.setdefault("is_flag", True)
+ attrs.setdefault("expose_value", False)
+ attrs.setdefault("is_eager", True)
+ attrs.setdefault("help", "Show the version and exit.")
+ attrs["callback"] = callback
+ return option(*(param_decls or ("--version",)), **attrs)(f)
+
+ return decorator
+
+
+def help_option(*param_decls, **attrs):
+ """Adds a ``--help`` option which immediately ends the program
+ printing out the help page. This is usually unnecessary to add as
+ this is added by default to all commands unless suppressed.
+
+ Like :func:`version_option`, this is implemented as eager option that
+ prints in the callback and exits.
+
+ All arguments are forwarded to :func:`option`.
+ """
+
+ def decorator(f):
+ def callback(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ attrs.setdefault("is_flag", True)
+ attrs.setdefault("expose_value", False)
+ attrs.setdefault("help", "Show this message and exit.")
+ attrs.setdefault("is_eager", True)
+ attrs["callback"] = callback
+ return option(*(param_decls or ("--help",)), **attrs)(f)
+
+ return decorator
diff --git a/libs/dynaconf/vendor/click/exceptions.py b/libs/dynaconf/vendor/click/exceptions.py
new file mode 100644
index 000000000..25b02bb0c
--- /dev/null
+++ b/libs/dynaconf/vendor/click/exceptions.py
@@ -0,0 +1,233 @@
+from ._compat import filename_to_ui
+from ._compat import get_text_stderr
+from .utils import echo
+
+
+def _join_param_hints(param_hint):
+ if isinstance(param_hint, (tuple, list)):
+ return " / ".join(repr(x) for x in param_hint)
+ return param_hint
+
+
+class ClickException(Exception):
+ """An exception that Click can handle and show to the user."""
+
+ #: The exit code for this exception.
+ exit_code = 1
+
+ def __init__(self, message):
+ super().__init__(message)
+ self.message = message
+
+ def format_message(self):
+ return self.message
+
+ def __str__(self):
+ return self.message
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ echo(f"Error: {self.format_message()}", file=file)
+
+
+class UsageError(ClickException):
+ """An internal exception that signals a usage error. This typically
+ aborts any further handling.
+
+ :param message: the error message to display.
+ :param ctx: optionally the context that caused this error. Click will
+ fill in the context automatically in some situations.
+ """
+
+ exit_code = 2
+
+ def __init__(self, message, ctx=None):
+ ClickException.__init__(self, message)
+ self.ctx = ctx
+ self.cmd = self.ctx.command if self.ctx else None
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ color = None
+ hint = ""
+ if self.cmd is not None and self.cmd.get_help_option(self.ctx) is not None:
+ hint = (
+ f"Try '{self.ctx.command_path}"
+ f" {self.ctx.help_option_names[0]}' for help.\n"
+ )
+ if self.ctx is not None:
+ color = self.ctx.color
+ echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color)
+ echo(f"Error: {self.format_message()}", file=file, color=color)
+
+
+class BadParameter(UsageError):
+ """An exception that formats out a standardized error message for a
+ bad parameter. This is useful when thrown from a callback or type as
+ Click will attach contextual information to it (for instance, which
+ parameter it is).
+
+ .. versionadded:: 2.0
+
+ :param param: the parameter object that caused this error. This can
+ be left out, and Click will attach this info itself
+ if possible.
+ :param param_hint: a string that shows up as parameter name. This
+ can be used as alternative to `param` in cases
+ where custom validation should happen. If it is
+ a string it's used as such, if it's a list then
+ each item is quoted and separated.
+ """
+
+ def __init__(self, message, ctx=None, param=None, param_hint=None):
+ UsageError.__init__(self, message, ctx)
+ self.param = param
+ self.param_hint = param_hint
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.get_error_hint(self.ctx)
+ else:
+ return f"Invalid value: {self.message}"
+ param_hint = _join_param_hints(param_hint)
+
+ return f"Invalid value for {param_hint}: {self.message}"
+
+
+class MissingParameter(BadParameter):
+ """Raised if click required an option or argument but it was not
+ provided when invoking the script.
+
+ .. versionadded:: 4.0
+
+ :param param_type: a string that indicates the type of the parameter.
+ The default is to inherit the parameter type from
+ the given `param`. Valid values are ``'parameter'``,
+ ``'option'`` or ``'argument'``.
+ """
+
+ def __init__(
+ self, message=None, ctx=None, param=None, param_hint=None, param_type=None
+ ):
+ BadParameter.__init__(self, message, ctx, param, param_hint)
+ self.param_type = param_type
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.get_error_hint(self.ctx)
+ else:
+ param_hint = None
+ param_hint = _join_param_hints(param_hint)
+
+ param_type = self.param_type
+ if param_type is None and self.param is not None:
+ param_type = self.param.param_type_name
+
+ msg = self.message
+ if self.param is not None:
+ msg_extra = self.param.type.get_missing_message(self.param)
+ if msg_extra:
+ if msg:
+ msg += f". {msg_extra}"
+ else:
+ msg = msg_extra
+
+ hint_str = f" {param_hint}" if param_hint else ""
+ return f"Missing {param_type}{hint_str}.{' ' if msg else ''}{msg or ''}"
+
+ def __str__(self):
+ if self.message is None:
+ param_name = self.param.name if self.param else None
+ return f"missing parameter: {param_name}"
+ else:
+ return self.message
+
+
+class NoSuchOption(UsageError):
+ """Raised if click attempted to handle an option that does not
+ exist.
+
+ .. versionadded:: 4.0
+ """
+
+ def __init__(self, option_name, message=None, possibilities=None, ctx=None):
+ if message is None:
+ message = f"no such option: {option_name}"
+ UsageError.__init__(self, message, ctx)
+ self.option_name = option_name
+ self.possibilities = possibilities
+
+ def format_message(self):
+ bits = [self.message]
+ if self.possibilities:
+ if len(self.possibilities) == 1:
+ bits.append(f"Did you mean {self.possibilities[0]}?")
+ else:
+ possibilities = sorted(self.possibilities)
+ bits.append(f"(Possible options: {', '.join(possibilities)})")
+ return " ".join(bits)
+
+
+class BadOptionUsage(UsageError):
+ """Raised if an option is generally supplied but the use of the option
+ was incorrect. This is for instance raised if the number of arguments
+ for an option is not correct.
+
+ .. versionadded:: 4.0
+
+ :param option_name: the name of the option being used incorrectly.
+ """
+
+ def __init__(self, option_name, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+ self.option_name = option_name
+
+
+class BadArgumentUsage(UsageError):
+ """Raised if an argument is generally supplied but the use of the argument
+ was incorrect. This is for instance raised if the number of values
+ for an argument is not correct.
+
+ .. versionadded:: 6.0
+ """
+
+ def __init__(self, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+
+
+class FileError(ClickException):
+ """Raised if a file cannot be opened."""
+
+ def __init__(self, filename, hint=None):
+ ui_filename = filename_to_ui(filename)
+ if hint is None:
+ hint = "unknown error"
+ ClickException.__init__(self, hint)
+ self.ui_filename = ui_filename
+ self.filename = filename
+
+ def format_message(self):
+ return f"Could not open file {self.ui_filename}: {self.message}"
+
+
+class Abort(RuntimeError):
+ """An internal signalling exception that signals Click to abort."""
+
+
+class Exit(RuntimeError):
+ """An exception that indicates that the application should exit with some
+ status code.
+
+ :param code: the status code to exit with.
+ """
+
+ __slots__ = ("exit_code",)
+
+ def __init__(self, code=0):
+ self.exit_code = code
diff --git a/libs/dynaconf/vendor/click/formatting.py b/libs/dynaconf/vendor/click/formatting.py
new file mode 100644
index 000000000..a298c2e65
--- /dev/null
+++ b/libs/dynaconf/vendor/click/formatting.py
@@ -0,0 +1,279 @@
+from contextlib import contextmanager
+
+from ._compat import term_len
+from .parser import split_opt
+from .termui import get_terminal_size
+
+# Can force a width. This is used by the test system
+FORCED_WIDTH = None
+
+
+def measure_table(rows):
+ widths = {}
+ for row in rows:
+ for idx, col in enumerate(row):
+ widths[idx] = max(widths.get(idx, 0), term_len(col))
+ return tuple(y for x, y in sorted(widths.items()))
+
+
+def iter_rows(rows, col_count):
+ for row in rows:
+ row = tuple(row)
+ yield row + ("",) * (col_count - len(row))
+
+
+def wrap_text(
+ text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False
+):
+ """A helper function that intelligently wraps text. By default, it
+ assumes that it operates on a single paragraph of text but if the
+ `preserve_paragraphs` parameter is provided it will intelligently
+ handle paragraphs (defined by two empty lines).
+
+ If paragraphs are handled, a paragraph can be prefixed with an empty
+ line containing the ``\\b`` character (``\\x08``) to indicate that
+ no rewrapping should happen in that block.
+
+ :param text: the text that should be rewrapped.
+ :param width: the maximum width for the text.
+ :param initial_indent: the initial indent that should be placed on the
+ first line as a string.
+ :param subsequent_indent: the indent string that should be placed on
+ each consecutive line.
+ :param preserve_paragraphs: if this flag is set then the wrapping will
+ intelligently handle paragraphs.
+ """
+ from ._textwrap import TextWrapper
+
+ text = text.expandtabs()
+ wrapper = TextWrapper(
+ width,
+ initial_indent=initial_indent,
+ subsequent_indent=subsequent_indent,
+ replace_whitespace=False,
+ )
+ if not preserve_paragraphs:
+ return wrapper.fill(text)
+
+ p = []
+ buf = []
+ indent = None
+
+ def _flush_par():
+ if not buf:
+ return
+ if buf[0].strip() == "\b":
+ p.append((indent or 0, True, "\n".join(buf[1:])))
+ else:
+ p.append((indent or 0, False, " ".join(buf)))
+ del buf[:]
+
+ for line in text.splitlines():
+ if not line:
+ _flush_par()
+ indent = None
+ else:
+ if indent is None:
+ orig_len = term_len(line)
+ line = line.lstrip()
+ indent = orig_len - term_len(line)
+ buf.append(line)
+ _flush_par()
+
+ rv = []
+ for indent, raw, text in p:
+ with wrapper.extra_indent(" " * indent):
+ if raw:
+ rv.append(wrapper.indent_only(text))
+ else:
+ rv.append(wrapper.fill(text))
+
+ return "\n\n".join(rv)
+
+
+class HelpFormatter:
+ """This class helps with formatting text-based help pages. It's
+ usually just needed for very special internal cases, but it's also
+ exposed so that developers can write their own fancy outputs.
+
+ At present, it always writes into memory.
+
+ :param indent_increment: the additional increment for each level.
+ :param width: the width for the text. This defaults to the terminal
+ width clamped to a maximum of 78.
+ """
+
+ def __init__(self, indent_increment=2, width=None, max_width=None):
+ self.indent_increment = indent_increment
+ if max_width is None:
+ max_width = 80
+ if width is None:
+ width = FORCED_WIDTH
+ if width is None:
+ width = max(min(get_terminal_size()[0], max_width) - 2, 50)
+ self.width = width
+ self.current_indent = 0
+ self.buffer = []
+
+ def write(self, string):
+ """Writes a unicode string into the internal buffer."""
+ self.buffer.append(string)
+
+ def indent(self):
+ """Increases the indentation."""
+ self.current_indent += self.indent_increment
+
+ def dedent(self):
+ """Decreases the indentation."""
+ self.current_indent -= self.indent_increment
+
+ def write_usage(self, prog, args="", prefix="Usage: "):
+ """Writes a usage line into the buffer.
+
+ :param prog: the program name.
+ :param args: whitespace separated list of arguments.
+ :param prefix: the prefix for the first line.
+ """
+ usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
+ text_width = self.width - self.current_indent
+
+ if text_width >= (term_len(usage_prefix) + 20):
+ # The arguments will fit to the right of the prefix.
+ indent = " " * term_len(usage_prefix)
+ self.write(
+ wrap_text(
+ args,
+ text_width,
+ initial_indent=usage_prefix,
+ subsequent_indent=indent,
+ )
+ )
+ else:
+ # The prefix is too long, put the arguments on the next line.
+ self.write(usage_prefix)
+ self.write("\n")
+ indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
+ self.write(
+ wrap_text(
+ args, text_width, initial_indent=indent, subsequent_indent=indent
+ )
+ )
+
+ self.write("\n")
+
+ def write_heading(self, heading):
+ """Writes a heading into the buffer."""
+ self.write(f"{'':>{self.current_indent}}{heading}:\n")
+
+ def write_paragraph(self):
+ """Writes a paragraph into the buffer."""
+ if self.buffer:
+ self.write("\n")
+
+ def write_text(self, text):
+ """Writes re-indented text into the buffer. This rewraps and
+ preserves paragraphs.
+ """
+ text_width = max(self.width - self.current_indent, 11)
+ indent = " " * self.current_indent
+ self.write(
+ wrap_text(
+ text,
+ text_width,
+ initial_indent=indent,
+ subsequent_indent=indent,
+ preserve_paragraphs=True,
+ )
+ )
+ self.write("\n")
+
+ def write_dl(self, rows, col_max=30, col_spacing=2):
+ """Writes a definition list into the buffer. This is how options
+ and commands are usually formatted.
+
+ :param rows: a list of two item tuples for the terms and values.
+ :param col_max: the maximum width of the first column.
+ :param col_spacing: the number of spaces between the first and
+ second column.
+ """
+ rows = list(rows)
+ widths = measure_table(rows)
+ if len(widths) != 2:
+ raise TypeError("Expected two columns for definition list")
+
+ first_col = min(widths[0], col_max) + col_spacing
+
+ for first, second in iter_rows(rows, len(widths)):
+ self.write(f"{'':>{self.current_indent}}{first}")
+ if not second:
+ self.write("\n")
+ continue
+ if term_len(first) <= first_col - col_spacing:
+ self.write(" " * (first_col - term_len(first)))
+ else:
+ self.write("\n")
+ self.write(" " * (first_col + self.current_indent))
+
+ text_width = max(self.width - first_col - 2, 10)
+ wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
+ lines = wrapped_text.splitlines()
+
+ if lines:
+ self.write(f"{lines[0]}\n")
+
+ for line in lines[1:]:
+ self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
+
+ if len(lines) > 1:
+ # separate long help from next option
+ self.write("\n")
+ else:
+ self.write("\n")
+
+ @contextmanager
+ def section(self, name):
+ """Helpful context manager that writes a paragraph, a heading,
+ and the indents.
+
+ :param name: the section name that is written as heading.
+ """
+ self.write_paragraph()
+ self.write_heading(name)
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ @contextmanager
+ def indentation(self):
+ """A context manager that increases the indentation."""
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ def getvalue(self):
+ """Returns the buffer contents."""
+ return "".join(self.buffer)
+
+
+def join_options(options):
+ """Given a list of option strings this joins them in the most appropriate
+ way and returns them in the form ``(formatted_string,
+ any_prefix_is_slash)`` where the second item in the tuple is a flag that
+ indicates if any of the option prefixes was a slash.
+ """
+ rv = []
+ any_prefix_is_slash = False
+ for opt in options:
+ prefix = split_opt(opt)[0]
+ if prefix == "/":
+ any_prefix_is_slash = True
+ rv.append((len(prefix), opt))
+
+ rv.sort(key=lambda x: x[0])
+
+ rv = ", ".join(x[1] for x in rv)
+ return rv, any_prefix_is_slash
diff --git a/libs/dynaconf/vendor/click/globals.py b/libs/dynaconf/vendor/click/globals.py
new file mode 100644
index 000000000..1649f9a0b
--- /dev/null
+++ b/libs/dynaconf/vendor/click/globals.py
@@ -0,0 +1,47 @@
+from threading import local
+
+_local = local()
+
+
+def get_current_context(silent=False):
+ """Returns the current click context. This can be used as a way to
+ access the current context object from anywhere. This is a more implicit
+ alternative to the :func:`pass_context` decorator. This function is
+ primarily useful for helpers such as :func:`echo` which might be
+ interested in changing its behavior based on the current context.
+
+ To push the current context, :meth:`Context.scope` can be used.
+
+ .. versionadded:: 5.0
+
+ :param silent: if set to `True` the return value is `None` if no context
+ is available. The default behavior is to raise a
+ :exc:`RuntimeError`.
+ """
+ try:
+ return _local.stack[-1]
+ except (AttributeError, IndexError):
+ if not silent:
+ raise RuntimeError("There is no active click context.")
+
+
+def push_context(ctx):
+ """Pushes a new context to the current stack."""
+ _local.__dict__.setdefault("stack", []).append(ctx)
+
+
+def pop_context():
+ """Removes the top level from the stack."""
+ _local.stack.pop()
+
+
+def resolve_color_default(color=None):
+ """"Internal helper to get the default value of the color flag. If a
+ value is passed it's returned unchanged, otherwise it's looked up from
+ the current context.
+ """
+ if color is not None:
+ return color
+ ctx = get_current_context(silent=True)
+ if ctx is not None:
+ return ctx.color
diff --git a/libs/dynaconf/vendor/click/parser.py b/libs/dynaconf/vendor/click/parser.py
new file mode 100644
index 000000000..158abb0de
--- /dev/null
+++ b/libs/dynaconf/vendor/click/parser.py
@@ -0,0 +1,431 @@
+"""
+This module started out as largely a copy paste from the stdlib's
+optparse module with the features removed that we do not need from
+optparse because we implement them in Click on a higher level (for
+instance type handling, help formatting and a lot more).
+
+The plan is to remove more and more from here over time.
+
+The reason this is a different module and not optparse from the stdlib
+is that there are differences in 2.x and 3.x about the error messages
+generated and optparse in the stdlib uses gettext for no good reason
+and might cause us issues.
+
+Click uses parts of optparse written by Gregory P. Ward and maintained
+by the Python Software Foundation. This is limited to code in parser.py.
+
+Copyright 2001-2006 Gregory P. Ward. All rights reserved.
+Copyright 2002-2006 Python Software Foundation. All rights reserved.
+"""
+# This code uses parts of optparse written by Gregory P. Ward and
+# maintained by the Python Software Foundation.
+# Copyright 2001-2006 Gregory P. Ward
+# Copyright 2002-2006 Python Software Foundation
+import re
+from collections import deque
+
+from .exceptions import BadArgumentUsage
+from .exceptions import BadOptionUsage
+from .exceptions import NoSuchOption
+from .exceptions import UsageError
+
+
+def _unpack_args(args, nargs_spec):
+ """Given an iterable of arguments and an iterable of nargs specifications,
+ it returns a tuple with all the unpacked arguments at the first index
+ and all remaining arguments as the second.
+
+ The nargs specification is the number of arguments that should be consumed
+ or `-1` to indicate that this position should eat up all the remainders.
+
+ Missing items are filled with `None`.
+ """
+ args = deque(args)
+ nargs_spec = deque(nargs_spec)
+ rv = []
+ spos = None
+
+ def _fetch(c):
+ try:
+ if spos is None:
+ return c.popleft()
+ else:
+ return c.pop()
+ except IndexError:
+ return None
+
+ while nargs_spec:
+ nargs = _fetch(nargs_spec)
+ if nargs == 1:
+ rv.append(_fetch(args))
+ elif nargs > 1:
+ x = [_fetch(args) for _ in range(nargs)]
+ # If we're reversed, we're pulling in the arguments in reverse,
+ # so we need to turn them around.
+ if spos is not None:
+ x.reverse()
+ rv.append(tuple(x))
+ elif nargs < 0:
+ if spos is not None:
+ raise TypeError("Cannot have two nargs < 0")
+ spos = len(rv)
+ rv.append(None)
+
+ # spos is the position of the wildcard (star). If it's not `None`,
+ # we fill it with the remainder.
+ if spos is not None:
+ rv[spos] = tuple(args)
+ args = []
+ rv[spos + 1 :] = reversed(rv[spos + 1 :])
+
+ return tuple(rv), list(args)
+
+
+def _error_opt_args(nargs, opt):
+ if nargs == 1:
+ raise BadOptionUsage(opt, f"{opt} option requires an argument")
+ raise BadOptionUsage(opt, f"{opt} option requires {nargs} arguments")
+
+
+def split_opt(opt):
+ first = opt[:1]
+ if first.isalnum():
+ return "", opt
+ if opt[1:2] == first:
+ return opt[:2], opt[2:]
+ return first, opt[1:]
+
+
+def normalize_opt(opt, ctx):
+ if ctx is None or ctx.token_normalize_func is None:
+ return opt
+ prefix, opt = split_opt(opt)
+ return f"{prefix}{ctx.token_normalize_func(opt)}"
+
+
+def split_arg_string(string):
+ """Given an argument string this attempts to split it into small parts."""
+ rv = []
+ for match in re.finditer(
+ r"('([^'\\]*(?:\\.[^'\\]*)*)'|\"([^\"\\]*(?:\\.[^\"\\]*)*)\"|\S+)\s*",
+ string,
+ re.S,
+ ):
+ arg = match.group().strip()
+ if arg[:1] == arg[-1:] and arg[:1] in "\"'":
+ arg = arg[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape")
+ try:
+ arg = type(string)(arg)
+ except UnicodeError:
+ pass
+ rv.append(arg)
+ return rv
+
+
+class Option:
+ def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+ self._short_opts = []
+ self._long_opts = []
+ self.prefixes = set()
+
+ for opt in opts:
+ prefix, value = split_opt(opt)
+ if not prefix:
+ raise ValueError(f"Invalid start character for option ({opt})")
+ self.prefixes.add(prefix[0])
+ if len(prefix) == 1 and len(value) == 1:
+ self._short_opts.append(opt)
+ else:
+ self._long_opts.append(opt)
+ self.prefixes.add(prefix)
+
+ if action is None:
+ action = "store"
+
+ self.dest = dest
+ self.action = action
+ self.nargs = nargs
+ self.const = const
+ self.obj = obj
+
+ @property
+ def takes_value(self):
+ return self.action in ("store", "append")
+
+ def process(self, value, state):
+ if self.action == "store":
+ state.opts[self.dest] = value
+ elif self.action == "store_const":
+ state.opts[self.dest] = self.const
+ elif self.action == "append":
+ state.opts.setdefault(self.dest, []).append(value)
+ elif self.action == "append_const":
+ state.opts.setdefault(self.dest, []).append(self.const)
+ elif self.action == "count":
+ state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
+ else:
+ raise ValueError(f"unknown action '{self.action}'")
+ state.order.append(self.obj)
+
+
+class Argument:
+ def __init__(self, dest, nargs=1, obj=None):
+ self.dest = dest
+ self.nargs = nargs
+ self.obj = obj
+
+ def process(self, value, state):
+ if self.nargs > 1:
+ holes = sum(1 for x in value if x is None)
+ if holes == len(value):
+ value = None
+ elif holes != 0:
+ raise BadArgumentUsage(
+ f"argument {self.dest} takes {self.nargs} values"
+ )
+ state.opts[self.dest] = value
+ state.order.append(self.obj)
+
+
+class ParsingState:
+ def __init__(self, rargs):
+ self.opts = {}
+ self.largs = []
+ self.rargs = rargs
+ self.order = []
+
+
+class OptionParser:
+ """The option parser is an internal class that is ultimately used to
+ parse options and arguments. It's modelled after optparse and brings
+ a similar but vastly simplified API. It should generally not be used
+ directly as the high level Click classes wrap it for you.
+
+ It's not nearly as extensible as optparse or argparse as it does not
+ implement features that are implemented on a higher level (such as
+ types or defaults).
+
+ :param ctx: optionally the :class:`~click.Context` where this parser
+ should go with.
+ """
+
+ def __init__(self, ctx=None):
+ #: The :class:`~click.Context` for this parser. This might be
+ #: `None` for some advanced use cases.
+ self.ctx = ctx
+ #: This controls how the parser deals with interspersed arguments.
+ #: If this is set to `False`, the parser will stop on the first
+ #: non-option. Click uses this to implement nested subcommands
+ #: safely.
+ self.allow_interspersed_args = True
+ #: This tells the parser how to deal with unknown options. By
+ #: default it will error out (which is sensible), but there is a
+ #: second mode where it will ignore it and continue processing
+ #: after shifting all the unknown options into the resulting args.
+ self.ignore_unknown_options = False
+ if ctx is not None:
+ self.allow_interspersed_args = ctx.allow_interspersed_args
+ self.ignore_unknown_options = ctx.ignore_unknown_options
+ self._short_opt = {}
+ self._long_opt = {}
+ self._opt_prefixes = {"-", "--"}
+ self._args = []
+
+ def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+ """Adds a new option named `dest` to the parser. The destination
+ is not inferred (unlike with optparse) and needs to be explicitly
+ provided. Action can be any of ``store``, ``store_const``,
+ ``append``, ``appnd_const`` or ``count``.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ opts = [normalize_opt(opt, self.ctx) for opt in opts]
+ option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj)
+ self._opt_prefixes.update(option.prefixes)
+ for opt in option._short_opts:
+ self._short_opt[opt] = option
+ for opt in option._long_opts:
+ self._long_opt[opt] = option
+
+ def add_argument(self, dest, nargs=1, obj=None):
+ """Adds a positional argument named `dest` to the parser.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
+
+ def parse_args(self, args):
+ """Parses positional arguments and returns ``(values, args, order)``
+ for the parsed options and arguments as well as the leftover
+ arguments if there are any. The order is a list of objects as they
+ appear on the command line. If arguments appear multiple times they
+ will be memorized multiple times as well.
+ """
+ state = ParsingState(args)
+ try:
+ self._process_args_for_options(state)
+ self._process_args_for_args(state)
+ except UsageError:
+ if self.ctx is None or not self.ctx.resilient_parsing:
+ raise
+ return state.opts, state.largs, state.order
+
+ def _process_args_for_args(self, state):
+ pargs, args = _unpack_args(
+ state.largs + state.rargs, [x.nargs for x in self._args]
+ )
+
+ for idx, arg in enumerate(self._args):
+ arg.process(pargs[idx], state)
+
+ state.largs = args
+ state.rargs = []
+
+ def _process_args_for_options(self, state):
+ while state.rargs:
+ arg = state.rargs.pop(0)
+ arglen = len(arg)
+ # Double dashes always handled explicitly regardless of what
+ # prefixes are valid.
+ if arg == "--":
+ return
+ elif arg[:1] in self._opt_prefixes and arglen > 1:
+ self._process_opts(arg, state)
+ elif self.allow_interspersed_args:
+ state.largs.append(arg)
+ else:
+ state.rargs.insert(0, arg)
+ return
+
+ # Say this is the original argument list:
+ # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+ # ^
+ # (we are about to process arg(i)).
+ #
+ # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+ # [arg0, ..., arg(i-1)] (any options and their arguments will have
+ # been removed from largs).
+ #
+ # The while loop will usually consume 1 or more arguments per pass.
+ # If it consumes 1 (eg. arg is an option that takes no arguments),
+ # then after _process_arg() is done the situation is:
+ #
+ # largs = subset of [arg0, ..., arg(i)]
+ # rargs = [arg(i+1), ..., arg(N-1)]
+ #
+ # If allow_interspersed_args is false, largs will always be
+ # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+ # not a very interesting subset!
+
+ def _match_long_opt(self, opt, explicit_value, state):
+ if opt not in self._long_opt:
+ possibilities = [word for word in self._long_opt if word.startswith(opt)]
+ raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
+
+ option = self._long_opt[opt]
+ if option.takes_value:
+ # At this point it's safe to modify rargs by injecting the
+ # explicit value, because no exception is raised in this
+ # branch. This means that the inserted value will be fully
+ # consumed.
+ if explicit_value is not None:
+ state.rargs.insert(0, explicit_value)
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ elif explicit_value is not None:
+ raise BadOptionUsage(opt, f"{opt} option does not take a value")
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ def _match_short_opt(self, arg, state):
+ stop = False
+ i = 1
+ prefix = arg[0]
+ unknown_options = []
+
+ for ch in arg[1:]:
+ opt = normalize_opt(f"{prefix}{ch}", self.ctx)
+ option = self._short_opt.get(opt)
+ i += 1
+
+ if not option:
+ if self.ignore_unknown_options:
+ unknown_options.append(ch)
+ continue
+ raise NoSuchOption(opt, ctx=self.ctx)
+ if option.takes_value:
+ # Any characters left in arg? Pretend they're the
+ # next arg, and stop consuming characters of arg.
+ if i < len(arg):
+ state.rargs.insert(0, arg[i:])
+ stop = True
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ if stop:
+ break
+
+ # If we got any unknown options we re-combinate the string of the
+ # remaining options and re-attach the prefix, then report that
+ # to the state as new larg. This way there is basic combinatorics
+ # that can be achieved while still ignoring unknown arguments.
+ if self.ignore_unknown_options and unknown_options:
+ state.largs.append(f"{prefix}{''.join(unknown_options)}")
+
+ def _process_opts(self, arg, state):
+ explicit_value = None
+ # Long option handling happens in two parts. The first part is
+ # supporting explicitly attached values. In any case, we will try
+ # to long match the option first.
+ if "=" in arg:
+ long_opt, explicit_value = arg.split("=", 1)
+ else:
+ long_opt = arg
+ norm_long_opt = normalize_opt(long_opt, self.ctx)
+
+ # At this point we will match the (assumed) long option through
+ # the long option matching code. Note that this allows options
+ # like "-foo" to be matched as long options.
+ try:
+ self._match_long_opt(norm_long_opt, explicit_value, state)
+ except NoSuchOption:
+ # At this point the long option matching failed, and we need
+ # to try with short options. However there is a special rule
+ # which says, that if we have a two character options prefix
+ # (applies to "--foo" for instance), we do not dispatch to the
+ # short option code and will instead raise the no option
+ # error.
+ if arg[:2] not in self._opt_prefixes:
+ return self._match_short_opt(arg, state)
+ if not self.ignore_unknown_options:
+ raise
+ state.largs.append(arg)
diff --git a/libs/dynaconf/vendor/click/termui.py b/libs/dynaconf/vendor/click/termui.py
new file mode 100644
index 000000000..a1bdf2ab8
--- /dev/null
+++ b/libs/dynaconf/vendor/click/termui.py
@@ -0,0 +1,688 @@
+import inspect
+import io
+import itertools
+import os
+import struct
+import sys
+
+from ._compat import DEFAULT_COLUMNS
+from ._compat import get_winterm_size
+from ._compat import isatty
+from ._compat import strip_ansi
+from ._compat import WIN
+from .exceptions import Abort
+from .exceptions import UsageError
+from .globals import resolve_color_default
+from .types import Choice
+from .types import convert_type
+from .types import Path
+from .utils import echo
+from .utils import LazyFile
+
+# The prompt functions to use. The doc tools currently override these
+# functions to customize how they work.
+visible_prompt_func = input
+
+_ansi_colors = {
+ "black": 30,
+ "red": 31,
+ "green": 32,
+ "yellow": 33,
+ "blue": 34,
+ "magenta": 35,
+ "cyan": 36,
+ "white": 37,
+ "reset": 39,
+ "bright_black": 90,
+ "bright_red": 91,
+ "bright_green": 92,
+ "bright_yellow": 93,
+ "bright_blue": 94,
+ "bright_magenta": 95,
+ "bright_cyan": 96,
+ "bright_white": 97,
+}
+_ansi_reset_all = "\033[0m"
+
+
+def hidden_prompt_func(prompt):
+ import getpass
+
+ return getpass.getpass(prompt)
+
+
+def _build_prompt(
+ text, suffix, show_default=False, default=None, show_choices=True, type=None
+):
+ prompt = text
+ if type is not None and show_choices and isinstance(type, Choice):
+ prompt += f" ({', '.join(map(str, type.choices))})"
+ if default is not None and show_default:
+ prompt = f"{prompt} [{_format_default(default)}]"
+ return f"{prompt}{suffix}"
+
+
+def _format_default(default):
+ if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
+ return default.name
+
+ return default
+
+
+def prompt(
+ text,
+ default=None,
+ hide_input=False,
+ confirmation_prompt=False,
+ type=None,
+ value_proc=None,
+ prompt_suffix=": ",
+ show_default=True,
+ err=False,
+ show_choices=True,
+):
+ """Prompts a user for input. This is a convenience function that can
+ be used to prompt a user for input later.
+
+ If the user aborts the input by sending a interrupt signal, this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 7.0
+ Added the show_choices parameter.
+
+ .. versionadded:: 6.0
+ Added unicode support for cmd.exe on Windows.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the text to show for the prompt.
+ :param default: the default value to use if no input happens. If this
+ is not given it will prompt until it's aborted.
+ :param hide_input: if this is set to true then the input value will
+ be hidden.
+ :param confirmation_prompt: asks for confirmation for the value.
+ :param type: the type to use to check the value against.
+ :param value_proc: if this parameter is provided it's a function that
+ is invoked instead of the type conversion to
+ convert a value.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ :param show_choices: Show or hide choices if the passed type is a Choice.
+ For example if type is a Choice of either day or week,
+ show_choices is true and text is "Group by" then the
+ prompt will be "Group by (day, week): ".
+ """
+ result = None
+
+ def prompt_func(text):
+ f = hidden_prompt_func if hide_input else visible_prompt_func
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(text, nl=False, err=err)
+ return f("")
+ except (KeyboardInterrupt, EOFError):
+ # getpass doesn't print a newline if the user aborts input with ^C.
+ # Allegedly this behavior is inherited from getpass(3).
+ # A doc bug has been filed at https://bugs.python.org/issue24711
+ if hide_input:
+ echo(None, err=err)
+ raise Abort()
+
+ if value_proc is None:
+ value_proc = convert_type(type, default)
+
+ prompt = _build_prompt(
+ text, prompt_suffix, show_default, default, show_choices, type
+ )
+
+ while 1:
+ while 1:
+ value = prompt_func(prompt)
+ if value:
+ break
+ elif default is not None:
+ if isinstance(value_proc, Path):
+ # validate Path default value(exists, dir_okay etc.)
+ value = default
+ break
+ return default
+ try:
+ result = value_proc(value)
+ except UsageError as e:
+ echo(f"Error: {e.message}", err=err) # noqa: B306
+ continue
+ if not confirmation_prompt:
+ return result
+ while 1:
+ value2 = prompt_func("Repeat for confirmation: ")
+ if value2:
+ break
+ if value == value2:
+ return result
+ echo("Error: the two entered values do not match", err=err)
+
+
+def confirm(
+ text, default=False, abort=False, prompt_suffix=": ", show_default=True, err=False
+):
+ """Prompts for confirmation (yes/no question).
+
+ If the user aborts the input by sending a interrupt signal this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the question to ask.
+ :param default: the default for the prompt.
+ :param abort: if this is set to `True` a negative answer aborts the
+ exception by raising :exc:`Abort`.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ prompt = _build_prompt(
+ text, prompt_suffix, show_default, "Y/n" if default else "y/N"
+ )
+ while 1:
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(prompt, nl=False, err=err)
+ value = visible_prompt_func("").lower().strip()
+ except (KeyboardInterrupt, EOFError):
+ raise Abort()
+ if value in ("y", "yes"):
+ rv = True
+ elif value in ("n", "no"):
+ rv = False
+ elif value == "":
+ rv = default
+ else:
+ echo("Error: invalid input", err=err)
+ continue
+ break
+ if abort and not rv:
+ raise Abort()
+ return rv
+
+
+def get_terminal_size():
+ """Returns the current size of the terminal as tuple in the form
+ ``(width, height)`` in columns and rows.
+ """
+ import shutil
+
+ if hasattr(shutil, "get_terminal_size"):
+ return shutil.get_terminal_size()
+
+ # We provide a sensible default for get_winterm_size() when being invoked
+ # inside a subprocess. Without this, it would not provide a useful input.
+ if get_winterm_size is not None:
+ size = get_winterm_size()
+ if size == (0, 0):
+ return (79, 24)
+ else:
+ return size
+
+ def ioctl_gwinsz(fd):
+ try:
+ import fcntl
+ import termios
+
+ cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
+ except Exception:
+ return
+ return cr
+
+ cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ try:
+ cr = ioctl_gwinsz(fd)
+ finally:
+ os.close(fd)
+ except Exception:
+ pass
+ if not cr or not cr[0] or not cr[1]:
+ cr = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", DEFAULT_COLUMNS))
+ return int(cr[1]), int(cr[0])
+
+
+def echo_via_pager(text_or_generator, color=None):
+ """This function takes a text and shows it via an environment specific
+ pager on stdout.
+
+ .. versionchanged:: 3.0
+ Added the `color` flag.
+
+ :param text_or_generator: the text to page, or alternatively, a
+ generator emitting the text to page.
+ :param color: controls if the pager supports ANSI colors or not. The
+ default is autodetection.
+ """
+ color = resolve_color_default(color)
+
+ if inspect.isgeneratorfunction(text_or_generator):
+ i = text_or_generator()
+ elif isinstance(text_or_generator, str):
+ i = [text_or_generator]
+ else:
+ i = iter(text_or_generator)
+
+ # convert every element of i to a text type if necessary
+ text_generator = (el if isinstance(el, str) else str(el) for el in i)
+
+ from ._termui_impl import pager
+
+ return pager(itertools.chain(text_generator, "\n"), color)
+
+
+def progressbar(
+ iterable=None,
+ length=None,
+ label=None,
+ show_eta=True,
+ show_percent=None,
+ show_pos=False,
+ item_show_func=None,
+ fill_char="#",
+ empty_char="-",
+ bar_template="%(label)s [%(bar)s] %(info)s",
+ info_sep=" ",
+ width=36,
+ file=None,
+ color=None,
+):
+ """This function creates an iterable context manager that can be used
+ to iterate over something while showing a progress bar. It will
+ either iterate over the `iterable` or `length` items (that are counted
+ up). While iteration happens, this function will print a rendered
+ progress bar to the given `file` (defaults to stdout) and will attempt
+ to calculate remaining time and more. By default, this progress bar
+ will not be rendered if the file is not a terminal.
+
+ The context manager creates the progress bar. When the context
+ manager is entered the progress bar is already created. With every
+ iteration over the progress bar, the iterable passed to the bar is
+ advanced and the bar is updated. When the context manager exits,
+ a newline is printed and the progress bar is finalized on screen.
+
+ Note: The progress bar is currently designed for use cases where the
+ total progress can be expected to take at least several seconds.
+ Because of this, the ProgressBar class object won't display
+ progress that is considered too fast, and progress where the time
+ between steps is less than a second.
+
+ No printing must happen or the progress bar will be unintentionally
+ destroyed.
+
+ Example usage::
+
+ with progressbar(items) as bar:
+ for item in bar:
+ do_something_with(item)
+
+ Alternatively, if no iterable is specified, one can manually update the
+ progress bar through the `update()` method instead of directly
+ iterating over the progress bar. The update method accepts the number
+ of steps to increment the bar with::
+
+ with progressbar(length=chunks.total_bytes) as bar:
+ for chunk in chunks:
+ process_chunk(chunk)
+ bar.update(chunks.bytes)
+
+ The ``update()`` method also takes an optional value specifying the
+ ``current_item`` at the new position. This is useful when used
+ together with ``item_show_func`` to customize the output for each
+ manual step::
+
+ with click.progressbar(
+ length=total_size,
+ label='Unzipping archive',
+ item_show_func=lambda a: a.filename
+ ) as bar:
+ for archive in zip_file:
+ archive.extract()
+ bar.update(archive.size, archive)
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `color` parameter. Added a `update` method to the
+ progressbar object.
+
+ :param iterable: an iterable to iterate over. If not provided the length
+ is required.
+ :param length: the number of items to iterate over. By default the
+ progressbar will attempt to ask the iterator about its
+ length, which might or might not work. If an iterable is
+ also provided this parameter can be used to override the
+ length. If an iterable is not provided the progress bar
+ will iterate over a range of that length.
+ :param label: the label to show next to the progress bar.
+ :param show_eta: enables or disables the estimated time display. This is
+ automatically disabled if the length cannot be
+ determined.
+ :param show_percent: enables or disables the percentage display. The
+ default is `True` if the iterable has a length or
+ `False` if not.
+ :param show_pos: enables or disables the absolute position display. The
+ default is `False`.
+ :param item_show_func: a function called with the current item which
+ can return a string to show the current item
+ next to the progress bar. Note that the current
+ item can be `None`!
+ :param fill_char: the character to use to show the filled part of the
+ progress bar.
+ :param empty_char: the character to use to show the non-filled part of
+ the progress bar.
+ :param bar_template: the format string to use as template for the bar.
+ The parameters in it are ``label`` for the label,
+ ``bar`` for the progress bar and ``info`` for the
+ info section.
+ :param info_sep: the separator between multiple info items (eta etc.)
+ :param width: the width of the progress bar in characters, 0 means full
+ terminal width
+ :param file: the file to write to. If this is not a terminal then
+ only the label is printed.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are included anywhere in the progress bar output
+ which is not the case by default.
+ """
+ from ._termui_impl import ProgressBar
+
+ color = resolve_color_default(color)
+ return ProgressBar(
+ iterable=iterable,
+ length=length,
+ show_eta=show_eta,
+ show_percent=show_percent,
+ show_pos=show_pos,
+ item_show_func=item_show_func,
+ fill_char=fill_char,
+ empty_char=empty_char,
+ bar_template=bar_template,
+ info_sep=info_sep,
+ file=file,
+ label=label,
+ width=width,
+ color=color,
+ )
+
+
+def clear():
+ """Clears the terminal screen. This will have the effect of clearing
+ the whole visible space of the terminal and moving the cursor to the
+ top left. This does not do anything if not connected to a terminal.
+
+ .. versionadded:: 2.0
+ """
+ if not isatty(sys.stdout):
+ return
+ # If we're on Windows and we don't have colorama available, then we
+ # clear the screen by shelling out. Otherwise we can use an escape
+ # sequence.
+ if WIN:
+ os.system("cls")
+ else:
+ sys.stdout.write("\033[2J\033[1;1H")
+
+
+def style(
+ text,
+ fg=None,
+ bg=None,
+ bold=None,
+ dim=None,
+ underline=None,
+ blink=None,
+ reverse=None,
+ reset=True,
+):
+ """Styles a text with ANSI styles and returns the new string. By
+ default the styling is self contained which means that at the end
+ of the string a reset code is issued. This can be prevented by
+ passing ``reset=False``.
+
+ Examples::
+
+ click.echo(click.style('Hello World!', fg='green'))
+ click.echo(click.style('ATTENTION!', blink=True))
+ click.echo(click.style('Some things', reverse=True, fg='cyan'))
+
+ Supported color names:
+
+ * ``black`` (might be a gray)
+ * ``red``
+ * ``green``
+ * ``yellow`` (might be an orange)
+ * ``blue``
+ * ``magenta``
+ * ``cyan``
+ * ``white`` (might be light gray)
+ * ``bright_black``
+ * ``bright_red``
+ * ``bright_green``
+ * ``bright_yellow``
+ * ``bright_blue``
+ * ``bright_magenta``
+ * ``bright_cyan``
+ * ``bright_white``
+ * ``reset`` (reset the color code only)
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 7.0
+ Added support for bright colors.
+
+ :param text: the string to style with ansi codes.
+ :param fg: if provided this will become the foreground color.
+ :param bg: if provided this will become the background color.
+ :param bold: if provided this will enable or disable bold mode.
+ :param dim: if provided this will enable or disable dim mode. This is
+ badly supported.
+ :param underline: if provided this will enable or disable underline.
+ :param blink: if provided this will enable or disable blinking.
+ :param reverse: if provided this will enable or disable inverse
+ rendering (foreground becomes background and the
+ other way round).
+ :param reset: by default a reset-all code is added at the end of the
+ string which means that styles do not carry over. This
+ can be disabled to compose styles.
+ """
+ bits = []
+ if fg:
+ try:
+ bits.append(f"\033[{_ansi_colors[fg]}m")
+ except KeyError:
+ raise TypeError(f"Unknown color {fg!r}")
+ if bg:
+ try:
+ bits.append(f"\033[{_ansi_colors[bg] + 10}m")
+ except KeyError:
+ raise TypeError(f"Unknown color {bg!r}")
+ if bold is not None:
+ bits.append(f"\033[{1 if bold else 22}m")
+ if dim is not None:
+ bits.append(f"\033[{2 if dim else 22}m")
+ if underline is not None:
+ bits.append(f"\033[{4 if underline else 24}m")
+ if blink is not None:
+ bits.append(f"\033[{5 if blink else 25}m")
+ if reverse is not None:
+ bits.append(f"\033[{7 if reverse else 27}m")
+ bits.append(text)
+ if reset:
+ bits.append(_ansi_reset_all)
+ return "".join(bits)
+
+
+def unstyle(text):
+ """Removes ANSI styling information from a string. Usually it's not
+ necessary to use this function as Click's echo function will
+ automatically remove styling if necessary.
+
+ .. versionadded:: 2.0
+
+ :param text: the text to remove style information from.
+ """
+ return strip_ansi(text)
+
+
+def secho(message=None, file=None, nl=True, err=False, color=None, **styles):
+ """This function combines :func:`echo` and :func:`style` into one
+ call. As such the following two calls are the same::
+
+ click.secho('Hello World!', fg='green')
+ click.echo(click.style('Hello World!', fg='green'))
+
+ All keyword arguments are forwarded to the underlying functions
+ depending on which one they go with.
+
+ .. versionadded:: 2.0
+ """
+ if message is not None:
+ message = style(message, **styles)
+ return echo(message, file=file, nl=nl, err=err, color=color)
+
+
+def edit(
+ text=None, editor=None, env=None, require_save=True, extension=".txt", filename=None
+):
+ r"""Edits the given text in the defined editor. If an editor is given
+ (should be the full path to the executable but the regular operating
+ system search path is used for finding the executable) it overrides
+ the detected editor. Optionally, some environment variables can be
+ used. If the editor is closed without changes, `None` is returned. In
+ case a file is edited directly the return value is always `None` and
+ `require_save` and `extension` are ignored.
+
+ If the editor cannot be opened a :exc:`UsageError` is raised.
+
+ Note for Windows: to simplify cross-platform usage, the newlines are
+ automatically converted from POSIX to Windows and vice versa. As such,
+ the message here will have ``\n`` as newline markers.
+
+ :param text: the text to edit.
+ :param editor: optionally the editor to use. Defaults to automatic
+ detection.
+ :param env: environment variables to forward to the editor.
+ :param require_save: if this is true, then not saving in the editor
+ will make the return value become `None`.
+ :param extension: the extension to tell the editor about. This defaults
+ to `.txt` but changing this might change syntax
+ highlighting.
+ :param filename: if provided it will edit this file instead of the
+ provided text contents. It will not use a temporary
+ file as an indirection in that case.
+ """
+ from ._termui_impl import Editor
+
+ editor = Editor(
+ editor=editor, env=env, require_save=require_save, extension=extension
+ )
+ if filename is None:
+ return editor.edit(text)
+ editor.edit_file(filename)
+
+
+def launch(url, wait=False, locate=False):
+ """This function launches the given URL (or filename) in the default
+ viewer application for this file type. If this is an executable, it
+ might launch the executable in a new session. The return value is
+ the exit code of the launched application. Usually, ``0`` indicates
+ success.
+
+ Examples::
+
+ click.launch('https://click.palletsprojects.com/')
+ click.launch('/my/downloaded/file', locate=True)
+
+ .. versionadded:: 2.0
+
+ :param url: URL or filename of the thing to launch.
+ :param wait: waits for the program to stop.
+ :param locate: if this is set to `True` then instead of launching the
+ application associated with the URL it will attempt to
+ launch a file manager with the file located. This
+ might have weird effects if the URL does not point to
+ the filesystem.
+ """
+ from ._termui_impl import open_url
+
+ return open_url(url, wait=wait, locate=locate)
+
+
+# If this is provided, getchar() calls into this instead. This is used
+# for unittesting purposes.
+_getchar = None
+
+
+def getchar(echo=False):
+ """Fetches a single character from the terminal and returns it. This
+ will always return a unicode character and under certain rare
+ circumstances this might return more than one character. The
+ situations which more than one character is returned is when for
+ whatever reason multiple characters end up in the terminal buffer or
+ standard input was not actually a terminal.
+
+ Note that this will always read from the terminal, even if something
+ is piped into the standard input.
+
+ Note for Windows: in rare cases when typing non-ASCII characters, this
+ function might wait for a second character and then return both at once.
+ This is because certain Unicode characters look like special-key markers.
+
+ .. versionadded:: 2.0
+
+ :param echo: if set to `True`, the character read will also show up on
+ the terminal. The default is to not show it.
+ """
+ f = _getchar
+ if f is None:
+ from ._termui_impl import getchar as f
+ return f(echo)
+
+
+def raw_terminal():
+ from ._termui_impl import raw_terminal as f
+
+ return f()
+
+
+def pause(info="Press any key to continue ...", err=False):
+ """This command stops execution and waits for the user to press any
+ key to continue. This is similar to the Windows batch "pause"
+ command. If the program is not run through a terminal, this command
+ will instead do nothing.
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param info: the info string to print before pausing.
+ :param err: if set to message goes to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ if not isatty(sys.stdin) or not isatty(sys.stdout):
+ return
+ try:
+ if info:
+ echo(info, nl=False, err=err)
+ try:
+ getchar()
+ except (KeyboardInterrupt, EOFError):
+ pass
+ finally:
+ if info:
+ echo(err=err)
diff --git a/libs/dynaconf/vendor/click/testing.py b/libs/dynaconf/vendor/click/testing.py
new file mode 100644
index 000000000..fd6bf61b1
--- /dev/null
+++ b/libs/dynaconf/vendor/click/testing.py
@@ -0,0 +1,362 @@
+import contextlib
+import io
+import os
+import shlex
+import shutil
+import sys
+import tempfile
+
+from . import formatting
+from . import termui
+from . import utils
+from ._compat import _find_binary_reader
+
+
+class EchoingStdin:
+ def __init__(self, input, output):
+ self._input = input
+ self._output = output
+
+ def __getattr__(self, x):
+ return getattr(self._input, x)
+
+ def _echo(self, rv):
+ self._output.write(rv)
+ return rv
+
+ def read(self, n=-1):
+ return self._echo(self._input.read(n))
+
+ def readline(self, n=-1):
+ return self._echo(self._input.readline(n))
+
+ def readlines(self):
+ return [self._echo(x) for x in self._input.readlines()]
+
+ def __iter__(self):
+ return iter(self._echo(x) for x in self._input)
+
+ def __repr__(self):
+ return repr(self._input)
+
+
+def make_input_stream(input, charset):
+ # Is already an input stream.
+ if hasattr(input, "read"):
+ rv = _find_binary_reader(input)
+
+ if rv is not None:
+ return rv
+
+ raise TypeError("Could not find binary reader for input stream.")
+
+ if input is None:
+ input = b""
+ elif not isinstance(input, bytes):
+ input = input.encode(charset)
+
+ return io.BytesIO(input)
+
+
+class Result:
+ """Holds the captured result of an invoked CLI script."""
+
+ def __init__(
+ self, runner, stdout_bytes, stderr_bytes, exit_code, exception, exc_info=None
+ ):
+ #: The runner that created the result
+ self.runner = runner
+ #: The standard output as bytes.
+ self.stdout_bytes = stdout_bytes
+ #: The standard error as bytes, or None if not available
+ self.stderr_bytes = stderr_bytes
+ #: The exit code as integer.
+ self.exit_code = exit_code
+ #: The exception that happened if one did.
+ self.exception = exception
+ #: The traceback
+ self.exc_info = exc_info
+
+ @property
+ def output(self):
+ """The (standard) output as unicode string."""
+ return self.stdout
+
+ @property
+ def stdout(self):
+ """The standard output as unicode string."""
+ return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
+ "\r\n", "\n"
+ )
+
+ @property
+ def stderr(self):
+ """The standard error as unicode string."""
+ if self.stderr_bytes is None:
+ raise ValueError("stderr not separately captured")
+ return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
+ "\r\n", "\n"
+ )
+
+ def __repr__(self):
+ exc_str = repr(self.exception) if self.exception else "okay"
+ return f"<{type(self).__name__} {exc_str}>"
+
+
+class CliRunner:
+ """The CLI runner provides functionality to invoke a Click command line
+ script for unittesting purposes in a isolated environment. This only
+ works in single-threaded systems without any concurrency as it changes the
+ global interpreter state.
+
+ :param charset: the character set for the input and output data.
+ :param env: a dictionary with environment variables for overriding.
+ :param echo_stdin: if this is set to `True`, then reading from stdin writes
+ to stdout. This is useful for showing examples in
+ some circumstances. Note that regular prompts
+ will automatically echo the input.
+ :param mix_stderr: if this is set to `False`, then stdout and stderr are
+ preserved as independent streams. This is useful for
+ Unix-philosophy apps that have predictable stdout and
+ noisy stderr, such that each may be measured
+ independently
+ """
+
+ def __init__(self, charset="utf-8", env=None, echo_stdin=False, mix_stderr=True):
+ self.charset = charset
+ self.env = env or {}
+ self.echo_stdin = echo_stdin
+ self.mix_stderr = mix_stderr
+
+ def get_default_prog_name(self, cli):
+ """Given a command object it will return the default program name
+ for it. The default is the `name` attribute or ``"root"`` if not
+ set.
+ """
+ return cli.name or "root"
+
+ def make_env(self, overrides=None):
+ """Returns the environment overrides for invoking a script."""
+ rv = dict(self.env)
+ if overrides:
+ rv.update(overrides)
+ return rv
+
+ @contextlib.contextmanager
+ def isolation(self, input=None, env=None, color=False):
+ """A context manager that sets up the isolation for invoking of a
+ command line tool. This sets up stdin with the given input data
+ and `os.environ` with the overrides from the given dictionary.
+ This also rebinds some internals in Click to be mocked (like the
+ prompt functionality).
+
+ This is automatically done in the :meth:`invoke` method.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param input: the input stream to put into sys.stdin.
+ :param env: the environment overrides as dictionary.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ input = make_input_stream(input, self.charset)
+
+ old_stdin = sys.stdin
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ old_forced_width = formatting.FORCED_WIDTH
+ formatting.FORCED_WIDTH = 80
+
+ env = self.make_env(env)
+
+ bytes_output = io.BytesIO()
+
+ if self.echo_stdin:
+ input = EchoingStdin(input, bytes_output)
+
+ input = io.TextIOWrapper(input, encoding=self.charset)
+ sys.stdout = io.TextIOWrapper(bytes_output, encoding=self.charset)
+
+ if not self.mix_stderr:
+ bytes_error = io.BytesIO()
+ sys.stderr = io.TextIOWrapper(bytes_error, encoding=self.charset)
+
+ if self.mix_stderr:
+ sys.stderr = sys.stdout
+
+ sys.stdin = input
+
+ def visible_input(prompt=None):
+ sys.stdout.write(prompt or "")
+ val = input.readline().rstrip("\r\n")
+ sys.stdout.write(f"{val}\n")
+ sys.stdout.flush()
+ return val
+
+ def hidden_input(prompt=None):
+ sys.stdout.write(f"{prompt or ''}\n")
+ sys.stdout.flush()
+ return input.readline().rstrip("\r\n")
+
+ def _getchar(echo):
+ char = sys.stdin.read(1)
+ if echo:
+ sys.stdout.write(char)
+ sys.stdout.flush()
+ return char
+
+ default_color = color
+
+ def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ return not default_color
+ return not color
+
+ old_visible_prompt_func = termui.visible_prompt_func
+ old_hidden_prompt_func = termui.hidden_prompt_func
+ old__getchar_func = termui._getchar
+ old_should_strip_ansi = utils.should_strip_ansi
+ termui.visible_prompt_func = visible_input
+ termui.hidden_prompt_func = hidden_input
+ termui._getchar = _getchar
+ utils.should_strip_ansi = should_strip_ansi
+
+ old_env = {}
+ try:
+ for key, value in env.items():
+ old_env[key] = os.environ.get(key)
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ yield (bytes_output, not self.mix_stderr and bytes_error)
+ finally:
+ for key, value in old_env.items():
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+ sys.stdin = old_stdin
+ termui.visible_prompt_func = old_visible_prompt_func
+ termui.hidden_prompt_func = old_hidden_prompt_func
+ termui._getchar = old__getchar_func
+ utils.should_strip_ansi = old_should_strip_ansi
+ formatting.FORCED_WIDTH = old_forced_width
+
+ def invoke(
+ self,
+ cli,
+ args=None,
+ input=None,
+ env=None,
+ catch_exceptions=True,
+ color=False,
+ **extra,
+ ):
+ """Invokes a command in an isolated environment. The arguments are
+ forwarded directly to the command line script, the `extra` keyword
+ arguments are passed to the :meth:`~clickpkg.Command.main` function of
+ the command.
+
+ This returns a :class:`Result` object.
+
+ .. versionadded:: 3.0
+ The ``catch_exceptions`` parameter was added.
+
+ .. versionchanged:: 3.0
+ The result object now has an `exc_info` attribute with the
+ traceback if available.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param cli: the command to invoke
+ :param args: the arguments to invoke. It may be given as an iterable
+ or a string. When given as string it will be interpreted
+ as a Unix shell command. More details at
+ :func:`shlex.split`.
+ :param input: the input data for `sys.stdin`.
+ :param env: the environment overrides.
+ :param catch_exceptions: Whether to catch any other exceptions than
+ ``SystemExit``.
+ :param extra: the keyword arguments to pass to :meth:`main`.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ exc_info = None
+ with self.isolation(input=input, env=env, color=color) as outstreams:
+ exception = None
+ exit_code = 0
+
+ if isinstance(args, str):
+ args = shlex.split(args)
+
+ try:
+ prog_name = extra.pop("prog_name")
+ except KeyError:
+ prog_name = self.get_default_prog_name(cli)
+
+ try:
+ cli.main(args=args or (), prog_name=prog_name, **extra)
+ except SystemExit as e:
+ exc_info = sys.exc_info()
+ exit_code = e.code
+ if exit_code is None:
+ exit_code = 0
+
+ if exit_code != 0:
+ exception = e
+
+ if not isinstance(exit_code, int):
+ sys.stdout.write(str(exit_code))
+ sys.stdout.write("\n")
+ exit_code = 1
+
+ except Exception as e:
+ if not catch_exceptions:
+ raise
+ exception = e
+ exit_code = 1
+ exc_info = sys.exc_info()
+ finally:
+ sys.stdout.flush()
+ stdout = outstreams[0].getvalue()
+ if self.mix_stderr:
+ stderr = None
+ else:
+ stderr = outstreams[1].getvalue()
+
+ return Result(
+ runner=self,
+ stdout_bytes=stdout,
+ stderr_bytes=stderr,
+ exit_code=exit_code,
+ exception=exception,
+ exc_info=exc_info,
+ )
+
+ @contextlib.contextmanager
+ def isolated_filesystem(self):
+ """A context manager that creates a temporary folder and changes
+ the current working directory to it for isolated filesystem tests.
+ """
+ cwd = os.getcwd()
+ t = tempfile.mkdtemp()
+ os.chdir(t)
+ try:
+ yield t
+ finally:
+ os.chdir(cwd)
+ try:
+ shutil.rmtree(t)
+ except OSError: # noqa: B014
+ pass
diff --git a/libs/dynaconf/vendor/click/types.py b/libs/dynaconf/vendor/click/types.py
new file mode 100644
index 000000000..93cf70195
--- /dev/null
+++ b/libs/dynaconf/vendor/click/types.py
@@ -0,0 +1,726 @@
+import os
+import stat
+from datetime import datetime
+
+from ._compat import _get_argv_encoding
+from ._compat import filename_to_ui
+from ._compat import get_filesystem_encoding
+from ._compat import get_strerror
+from ._compat import open_stream
+from .exceptions import BadParameter
+from .utils import LazyFile
+from .utils import safecall
+
+
+class ParamType:
+ """Helper for converting values through types. The following is
+ necessary for a valid type:
+
+ * it needs a name
+ * it needs to pass through None unchanged
+ * it needs to convert from a string
+ * it needs to convert its result type through unchanged
+ (eg: needs to be idempotent)
+ * it needs to be able to deal with param and context being `None`.
+ This can be the case when the object is used with prompt
+ inputs.
+ """
+
+ is_composite = False
+
+ #: the descriptive name of this type
+ name = None
+
+ #: if a list of this type is expected and the value is pulled from a
+ #: string environment variable, this is what splits it up. `None`
+ #: means any whitespace. For all parameters the general rule is that
+ #: whitespace splits them up. The exception are paths and files which
+ #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
+ #: Windows).
+ envvar_list_splitter = None
+
+ def __call__(self, value, param=None, ctx=None):
+ if value is not None:
+ return self.convert(value, param, ctx)
+
+ def get_metavar(self, param):
+ """Returns the metavar default for this param if it provides one."""
+
+ def get_missing_message(self, param):
+ """Optionally might return extra information about a missing
+ parameter.
+
+ .. versionadded:: 2.0
+ """
+
+ def convert(self, value, param, ctx):
+ """Converts the value. This is not invoked for values that are
+ `None` (the missing value).
+ """
+ return value
+
+ def split_envvar_value(self, rv):
+ """Given a value from an environment variable this splits it up
+ into small chunks depending on the defined envvar list splitter.
+
+ If the splitter is set to `None`, which means that whitespace splits,
+ then leading and trailing whitespace is ignored. Otherwise, leading
+ and trailing splitters usually lead to empty items being included.
+ """
+ return (rv or "").split(self.envvar_list_splitter)
+
+ def fail(self, message, param=None, ctx=None):
+ """Helper method to fail with an invalid value message."""
+ raise BadParameter(message, ctx=ctx, param=param)
+
+
+class CompositeParamType(ParamType):
+ is_composite = True
+
+ @property
+ def arity(self):
+ raise NotImplementedError()
+
+
+class FuncParamType(ParamType):
+ def __init__(self, func):
+ self.name = func.__name__
+ self.func = func
+
+ def convert(self, value, param, ctx):
+ try:
+ return self.func(value)
+ except ValueError:
+ try:
+ value = str(value)
+ except UnicodeError:
+ value = value.decode("utf-8", "replace")
+
+ self.fail(value, param, ctx)
+
+
+class UnprocessedParamType(ParamType):
+ name = "text"
+
+ def convert(self, value, param, ctx):
+ return value
+
+ def __repr__(self):
+ return "UNPROCESSED"
+
+
+class StringParamType(ParamType):
+ name = "text"
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bytes):
+ enc = _get_argv_encoding()
+ try:
+ value = value.decode(enc)
+ except UnicodeError:
+ fs_enc = get_filesystem_encoding()
+ if fs_enc != enc:
+ try:
+ value = value.decode(fs_enc)
+ except UnicodeError:
+ value = value.decode("utf-8", "replace")
+ else:
+ value = value.decode("utf-8", "replace")
+ return value
+ return value
+
+ def __repr__(self):
+ return "STRING"
+
+
+class Choice(ParamType):
+ """The choice type allows a value to be checked against a fixed set
+ of supported values. All of these values have to be strings.
+
+ You should only pass a list or tuple of choices. Other iterables
+ (like generators) may lead to surprising results.
+
+ The resulting value will always be one of the originally passed choices
+ regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
+ being specified.
+
+ See :ref:`choice-opts` for an example.
+
+ :param case_sensitive: Set to false to make choices case
+ insensitive. Defaults to true.
+ """
+
+ name = "choice"
+
+ def __init__(self, choices, case_sensitive=True):
+ self.choices = choices
+ self.case_sensitive = case_sensitive
+
+ def get_metavar(self, param):
+ return f"[{'|'.join(self.choices)}]"
+
+ def get_missing_message(self, param):
+ choice_str = ",\n\t".join(self.choices)
+ return f"Choose from:\n\t{choice_str}"
+
+ def convert(self, value, param, ctx):
+ # Match through normalization and case sensitivity
+ # first do token_normalize_func, then lowercase
+ # preserve original `value` to produce an accurate message in
+ # `self.fail`
+ normed_value = value
+ normed_choices = {choice: choice for choice in self.choices}
+
+ if ctx is not None and ctx.token_normalize_func is not None:
+ normed_value = ctx.token_normalize_func(value)
+ normed_choices = {
+ ctx.token_normalize_func(normed_choice): original
+ for normed_choice, original in normed_choices.items()
+ }
+
+ if not self.case_sensitive:
+ normed_value = normed_value.casefold()
+ normed_choices = {
+ normed_choice.casefold(): original
+ for normed_choice, original in normed_choices.items()
+ }
+
+ if normed_value in normed_choices:
+ return normed_choices[normed_value]
+
+ self.fail(
+ f"invalid choice: {value}. (choose from {', '.join(self.choices)})",
+ param,
+ ctx,
+ )
+
+ def __repr__(self):
+ return f"Choice({list(self.choices)})"
+
+
+class DateTime(ParamType):
+ """The DateTime type converts date strings into `datetime` objects.
+
+ The format strings which are checked are configurable, but default to some
+ common (non-timezone aware) ISO 8601 formats.
+
+ When specifying *DateTime* formats, you should only pass a list or a tuple.
+ Other iterables, like generators, may lead to surprising results.
+
+ The format strings are processed using ``datetime.strptime``, and this
+ consequently defines the format strings which are allowed.
+
+ Parsing is tried using each format, in order, and the first format which
+ parses successfully is used.
+
+ :param formats: A list or tuple of date format strings, in the order in
+ which they should be tried. Defaults to
+ ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
+ ``'%Y-%m-%d %H:%M:%S'``.
+ """
+
+ name = "datetime"
+
+ def __init__(self, formats=None):
+ self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
+
+ def get_metavar(self, param):
+ return f"[{'|'.join(self.formats)}]"
+
+ def _try_to_convert_date(self, value, format):
+ try:
+ return datetime.strptime(value, format)
+ except ValueError:
+ return None
+
+ def convert(self, value, param, ctx):
+ # Exact match
+ for format in self.formats:
+ dtime = self._try_to_convert_date(value, format)
+ if dtime:
+ return dtime
+
+ self.fail(
+ f"invalid datetime format: {value}. (choose from {', '.join(self.formats)})"
+ )
+
+ def __repr__(self):
+ return "DateTime"
+
+
+class IntParamType(ParamType):
+ name = "integer"
+
+ def convert(self, value, param, ctx):
+ try:
+ return int(value)
+ except ValueError:
+ self.fail(f"{value} is not a valid integer", param, ctx)
+
+ def __repr__(self):
+ return "INT"
+
+
+class IntRange(IntParamType):
+ """A parameter that works similar to :data:`click.INT` but restricts
+ the value to fit into a range. The default behavior is to fail if the
+ value falls outside the range, but it can also be silently clamped
+ between the two edges.
+
+ See :ref:`ranges` for an example.
+ """
+
+ name = "integer range"
+
+ def __init__(self, min=None, max=None, clamp=False):
+ self.min = min
+ self.max = max
+ self.clamp = clamp
+
+ def convert(self, value, param, ctx):
+ rv = IntParamType.convert(self, value, param, ctx)
+ if self.clamp:
+ if self.min is not None and rv < self.min:
+ return self.min
+ if self.max is not None and rv > self.max:
+ return self.max
+ if (
+ self.min is not None
+ and rv < self.min
+ or self.max is not None
+ and rv > self.max
+ ):
+ if self.min is None:
+ self.fail(
+ f"{rv} is bigger than the maximum valid value {self.max}.",
+ param,
+ ctx,
+ )
+ elif self.max is None:
+ self.fail(
+ f"{rv} is smaller than the minimum valid value {self.min}.",
+ param,
+ ctx,
+ )
+ else:
+ self.fail(
+ f"{rv} is not in the valid range of {self.min} to {self.max}.",
+ param,
+ ctx,
+ )
+ return rv
+
+ def __repr__(self):
+ return f"IntRange({self.min}, {self.max})"
+
+
+class FloatParamType(ParamType):
+ name = "float"
+
+ def convert(self, value, param, ctx):
+ try:
+ return float(value)
+ except ValueError:
+ self.fail(f"{value} is not a valid floating point value", param, ctx)
+
+ def __repr__(self):
+ return "FLOAT"
+
+
+class FloatRange(FloatParamType):
+ """A parameter that works similar to :data:`click.FLOAT` but restricts
+ the value to fit into a range. The default behavior is to fail if the
+ value falls outside the range, but it can also be silently clamped
+ between the two edges.
+
+ See :ref:`ranges` for an example.
+ """
+
+ name = "float range"
+
+ def __init__(self, min=None, max=None, clamp=False):
+ self.min = min
+ self.max = max
+ self.clamp = clamp
+
+ def convert(self, value, param, ctx):
+ rv = FloatParamType.convert(self, value, param, ctx)
+ if self.clamp:
+ if self.min is not None and rv < self.min:
+ return self.min
+ if self.max is not None and rv > self.max:
+ return self.max
+ if (
+ self.min is not None
+ and rv < self.min
+ or self.max is not None
+ and rv > self.max
+ ):
+ if self.min is None:
+ self.fail(
+ f"{rv} is bigger than the maximum valid value {self.max}.",
+ param,
+ ctx,
+ )
+ elif self.max is None:
+ self.fail(
+ f"{rv} is smaller than the minimum valid value {self.min}.",
+ param,
+ ctx,
+ )
+ else:
+ self.fail(
+ f"{rv} is not in the valid range of {self.min} to {self.max}.",
+ param,
+ ctx,
+ )
+ return rv
+
+ def __repr__(self):
+ return f"FloatRange({self.min}, {self.max})"
+
+
+class BoolParamType(ParamType):
+ name = "boolean"
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bool):
+ return bool(value)
+ value = value.lower()
+ if value in ("true", "t", "1", "yes", "y"):
+ return True
+ elif value in ("false", "f", "0", "no", "n"):
+ return False
+ self.fail(f"{value} is not a valid boolean", param, ctx)
+
+ def __repr__(self):
+ return "BOOL"
+
+
+class UUIDParameterType(ParamType):
+ name = "uuid"
+
+ def convert(self, value, param, ctx):
+ import uuid
+
+ try:
+ return uuid.UUID(value)
+ except ValueError:
+ self.fail(f"{value} is not a valid UUID value", param, ctx)
+
+ def __repr__(self):
+ return "UUID"
+
+
+class File(ParamType):
+ """Declares a parameter to be a file for reading or writing. The file
+ is automatically closed once the context tears down (after the command
+ finished working).
+
+ Files can be opened for reading or writing. The special value ``-``
+ indicates stdin or stdout depending on the mode.
+
+ By default, the file is opened for reading text data, but it can also be
+ opened in binary mode or for writing. The encoding parameter can be used
+ to force a specific encoding.
+
+ The `lazy` flag controls if the file should be opened immediately or upon
+ first IO. The default is to be non-lazy for standard input and output
+ streams as well as files opened for reading, `lazy` otherwise. When opening a
+ file lazily for reading, it is still opened temporarily for validation, but
+ will not be held open until first IO. lazy is mainly useful when opening
+ for writing to avoid creating the file until it is needed.
+
+ Starting with Click 2.0, files can also be opened atomically in which
+ case all writes go into a separate file in the same folder and upon
+ completion the file will be moved over to the original location. This
+ is useful if a file regularly read by other users is modified.
+
+ See :ref:`file-args` for more information.
+ """
+
+ name = "filename"
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(
+ self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False
+ ):
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.lazy = lazy
+ self.atomic = atomic
+
+ def resolve_lazy_flag(self, value):
+ if self.lazy is not None:
+ return self.lazy
+ if value == "-":
+ return False
+ elif "w" in self.mode:
+ return True
+ return False
+
+ def convert(self, value, param, ctx):
+ try:
+ if hasattr(value, "read") or hasattr(value, "write"):
+ return value
+
+ lazy = self.resolve_lazy_flag(value)
+
+ if lazy:
+ f = LazyFile(
+ value, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+ if ctx is not None:
+ ctx.call_on_close(f.close_intelligently)
+ return f
+
+ f, should_close = open_stream(
+ value, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+ # If a context is provided, we automatically close the file
+ # at the end of the context execution (or flush out). If a
+ # context does not exist, it's the caller's responsibility to
+ # properly close the file. This for instance happens when the
+ # type is used with prompts.
+ if ctx is not None:
+ if should_close:
+ ctx.call_on_close(safecall(f.close))
+ else:
+ ctx.call_on_close(safecall(f.flush))
+ return f
+ except OSError as e: # noqa: B014
+ self.fail(
+ f"Could not open file: {filename_to_ui(value)}: {get_strerror(e)}",
+ param,
+ ctx,
+ )
+
+
+class Path(ParamType):
+ """The path type is similar to the :class:`File` type but it performs
+ different checks. First of all, instead of returning an open file
+ handle it returns just the filename. Secondly, it can perform various
+ basic checks about what the file or directory should be.
+
+ .. versionchanged:: 6.0
+ `allow_dash` was added.
+
+ :param exists: if set to true, the file or directory needs to exist for
+ this value to be valid. If this is not required and a
+ file does indeed not exist, then all further checks are
+ silently skipped.
+ :param file_okay: controls if a file is a possible value.
+ :param dir_okay: controls if a directory is a possible value.
+ :param writable: if true, a writable check is performed.
+ :param readable: if true, a readable check is performed.
+ :param resolve_path: if this is true, then the path is fully resolved
+ before the value is passed onwards. This means
+ that it's absolute and symlinks are resolved. It
+ will not expand a tilde-prefix, as this is
+ supposed to be done by the shell only.
+ :param allow_dash: If this is set to `True`, a single dash to indicate
+ standard streams is permitted.
+ :param path_type: optionally a string type that should be used to
+ represent the path. The default is `None` which
+ means the return value will be either bytes or
+ unicode depending on what makes most sense given the
+ input data Click deals with.
+ """
+
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(
+ self,
+ exists=False,
+ file_okay=True,
+ dir_okay=True,
+ writable=False,
+ readable=True,
+ resolve_path=False,
+ allow_dash=False,
+ path_type=None,
+ ):
+ self.exists = exists
+ self.file_okay = file_okay
+ self.dir_okay = dir_okay
+ self.writable = writable
+ self.readable = readable
+ self.resolve_path = resolve_path
+ self.allow_dash = allow_dash
+ self.type = path_type
+
+ if self.file_okay and not self.dir_okay:
+ self.name = "file"
+ self.path_type = "File"
+ elif self.dir_okay and not self.file_okay:
+ self.name = "directory"
+ self.path_type = "Directory"
+ else:
+ self.name = "path"
+ self.path_type = "Path"
+
+ def coerce_path_result(self, rv):
+ if self.type is not None and not isinstance(rv, self.type):
+ if self.type is str:
+ rv = rv.decode(get_filesystem_encoding())
+ else:
+ rv = rv.encode(get_filesystem_encoding())
+ return rv
+
+ def convert(self, value, param, ctx):
+ rv = value
+
+ is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
+
+ if not is_dash:
+ if self.resolve_path:
+ rv = os.path.realpath(rv)
+
+ try:
+ st = os.stat(rv)
+ except OSError:
+ if not self.exists:
+ return self.coerce_path_result(rv)
+ self.fail(
+ f"{self.path_type} {filename_to_ui(value)!r} does not exist.",
+ param,
+ ctx,
+ )
+
+ if not self.file_okay and stat.S_ISREG(st.st_mode):
+ self.fail(
+ f"{self.path_type} {filename_to_ui(value)!r} is a file.",
+ param,
+ ctx,
+ )
+ if not self.dir_okay and stat.S_ISDIR(st.st_mode):
+ self.fail(
+ f"{self.path_type} {filename_to_ui(value)!r} is a directory.",
+ param,
+ ctx,
+ )
+ if self.writable and not os.access(value, os.W_OK):
+ self.fail(
+ f"{self.path_type} {filename_to_ui(value)!r} is not writable.",
+ param,
+ ctx,
+ )
+ if self.readable and not os.access(value, os.R_OK):
+ self.fail(
+ f"{self.path_type} {filename_to_ui(value)!r} is not readable.",
+ param,
+ ctx,
+ )
+
+ return self.coerce_path_result(rv)
+
+
+class Tuple(CompositeParamType):
+ """The default behavior of Click is to apply a type on a value directly.
+ This works well in most cases, except for when `nargs` is set to a fixed
+ count and different types should be used for different items. In this
+ case the :class:`Tuple` type can be used. This type can only be used
+ if `nargs` is set to a fixed number.
+
+ For more information see :ref:`tuple-type`.
+
+ This can be selected by using a Python tuple literal as a type.
+
+ :param types: a list of types that should be used for the tuple items.
+ """
+
+ def __init__(self, types):
+ self.types = [convert_type(ty) for ty in types]
+
+ @property
+ def name(self):
+ return f"<{' '.join(ty.name for ty in self.types)}>"
+
+ @property
+ def arity(self):
+ return len(self.types)
+
+ def convert(self, value, param, ctx):
+ if len(value) != len(self.types):
+ raise TypeError(
+ "It would appear that nargs is set to conflict with the"
+ " composite type arity."
+ )
+ return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
+
+
+def convert_type(ty, default=None):
+ """Converts a callable or python type into the most appropriate
+ param type.
+ """
+ guessed_type = False
+ if ty is None and default is not None:
+ if isinstance(default, tuple):
+ ty = tuple(map(type, default))
+ else:
+ ty = type(default)
+ guessed_type = True
+
+ if isinstance(ty, tuple):
+ return Tuple(ty)
+ if isinstance(ty, ParamType):
+ return ty
+ if ty is str or ty is None:
+ return STRING
+ if ty is int:
+ return INT
+ # Booleans are only okay if not guessed. This is done because for
+ # flags the default value is actually a bit of a lie in that it
+ # indicates which of the flags is the one we want. See get_default()
+ # for more information.
+ if ty is bool and not guessed_type:
+ return BOOL
+ if ty is float:
+ return FLOAT
+ if guessed_type:
+ return STRING
+
+ # Catch a common mistake
+ if __debug__:
+ try:
+ if issubclass(ty, ParamType):
+ raise AssertionError(
+ f"Attempted to use an uninstantiated parameter type ({ty})."
+ )
+ except TypeError:
+ pass
+ return FuncParamType(ty)
+
+
+#: A dummy parameter type that just does nothing. From a user's
+#: perspective this appears to just be the same as `STRING` but
+#: internally no string conversion takes place if the input was bytes.
+#: This is usually useful when working with file paths as they can
+#: appear in bytes and unicode.
+#:
+#: For path related uses the :class:`Path` type is a better choice but
+#: there are situations where an unprocessed type is useful which is why
+#: it is is provided.
+#:
+#: .. versionadded:: 4.0
+UNPROCESSED = UnprocessedParamType()
+
+#: A unicode string parameter type which is the implicit default. This
+#: can also be selected by using ``str`` as type.
+STRING = StringParamType()
+
+#: An integer parameter. This can also be selected by using ``int`` as
+#: type.
+INT = IntParamType()
+
+#: A floating point value parameter. This can also be selected by using
+#: ``float`` as type.
+FLOAT = FloatParamType()
+
+#: A boolean parameter. This is the default for boolean flags. This can
+#: also be selected by using ``bool`` as a type.
+BOOL = BoolParamType()
+
+#: A UUID parameter.
+UUID = UUIDParameterType()
diff --git a/libs/dynaconf/vendor/click/utils.py b/libs/dynaconf/vendor/click/utils.py
new file mode 100644
index 000000000..bd9dd8e7a
--- /dev/null
+++ b/libs/dynaconf/vendor/click/utils.py
@@ -0,0 +1,440 @@
+import os
+import sys
+
+from ._compat import _default_text_stderr
+from ._compat import _default_text_stdout
+from ._compat import _find_binary_writer
+from ._compat import auto_wrap_for_ansi
+from ._compat import binary_streams
+from ._compat import filename_to_ui
+from ._compat import get_filesystem_encoding
+from ._compat import get_strerror
+from ._compat import is_bytes
+from ._compat import open_stream
+from ._compat import should_strip_ansi
+from ._compat import strip_ansi
+from ._compat import text_streams
+from ._compat import WIN
+from .globals import resolve_color_default
+
+
+echo_native_types = (str, bytes, bytearray)
+
+
+def _posixify(name):
+ return "-".join(name.split()).lower()
+
+
+def safecall(func):
+ """Wraps a function so that it swallows exceptions."""
+
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ pass
+
+ return wrapper
+
+
+def make_str(value):
+ """Converts a value into a valid string."""
+ if isinstance(value, bytes):
+ try:
+ return value.decode(get_filesystem_encoding())
+ except UnicodeError:
+ return value.decode("utf-8", "replace")
+ return str(value)
+
+
+def make_default_short_help(help, max_length=45):
+ """Return a condensed version of help string."""
+ words = help.split()
+ total_length = 0
+ result = []
+ done = False
+
+ for word in words:
+ if word[-1:] == ".":
+ done = True
+ new_length = 1 + len(word) if result else len(word)
+ if total_length + new_length > max_length:
+ result.append("...")
+ done = True
+ else:
+ if result:
+ result.append(" ")
+ result.append(word)
+ if done:
+ break
+ total_length += new_length
+
+ return "".join(result)
+
+
+class LazyFile:
+ """A lazy file works like a regular file but it does not fully open
+ the file but it does perform some basic checks early to see if the
+ filename parameter does make sense. This is useful for safely opening
+ files for writing.
+ """
+
+ def __init__(
+ self, filename, mode="r", encoding=None, errors="strict", atomic=False
+ ):
+ self.name = filename
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.atomic = atomic
+
+ if filename == "-":
+ self._f, self.should_close = open_stream(filename, mode, encoding, errors)
+ else:
+ if "r" in mode:
+ # Open and close the file in case we're opening it for
+ # reading so that we can catch at least some errors in
+ # some cases early.
+ open(filename, mode).close()
+ self._f = None
+ self.should_close = True
+
+ def __getattr__(self, name):
+ return getattr(self.open(), name)
+
+ def __repr__(self):
+ if self._f is not None:
+ return repr(self._f)
+ return f"<unopened file '{self.name}' {self.mode}>"
+
+ def open(self):
+ """Opens the file if it's not yet open. This call might fail with
+ a :exc:`FileError`. Not handling this error will produce an error
+ that Click shows.
+ """
+ if self._f is not None:
+ return self._f
+ try:
+ rv, self.should_close = open_stream(
+ self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+ except OSError as e: # noqa: E402
+ from .exceptions import FileError
+
+ raise FileError(self.name, hint=get_strerror(e))
+ self._f = rv
+ return rv
+
+ def close(self):
+ """Closes the underlying file, no matter what."""
+ if self._f is not None:
+ self._f.close()
+
+ def close_intelligently(self):
+ """This function only closes the file if it was opened by the lazy
+ file wrapper. For instance this will never close stdin.
+ """
+ if self.should_close:
+ self.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close_intelligently()
+
+ def __iter__(self):
+ self.open()
+ return iter(self._f)
+
+
+class KeepOpenFile:
+ def __init__(self, file):
+ self._file = file
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ pass
+
+ def __repr__(self):
+ return repr(self._file)
+
+ def __iter__(self):
+ return iter(self._file)
+
+
+def echo(message=None, file=None, nl=True, err=False, color=None):
+ """Prints a message plus a newline to the given file or stdout. On
+ first sight, this looks like the print function, but it has improved
+ support for handling Unicode and binary data that does not fail no
+ matter how badly configured the system is.
+
+ Primarily it means that you can print binary data as well as Unicode
+ data on both 2.x and 3.x to the given file in the most appropriate way
+ possible. This is a very carefree function in that it will try its
+ best to not fail. As of Click 6.0 this includes support for unicode
+ output on the Windows console.
+
+ In addition to that, if `colorama`_ is installed, the echo function will
+ also support clever handling of ANSI codes. Essentially it will then
+ do the following:
+
+ - add transparent handling of ANSI color codes on Windows.
+ - hide ANSI codes automatically if the destination file is not a
+ terminal.
+
+ .. _colorama: https://pypi.org/project/colorama/
+
+ .. versionchanged:: 6.0
+ As of Click 6.0 the echo function will properly support unicode
+ output on the windows console. Not that click does not modify
+ the interpreter in any way which means that `sys.stdout` or the
+ print statement or function will still not provide unicode support.
+
+ .. versionchanged:: 2.0
+ Starting with version 2.0 of Click, the echo function will work
+ with colorama if it's installed.
+
+ .. versionadded:: 3.0
+ The `err` parameter was added.
+
+ .. versionchanged:: 4.0
+ Added the `color` flag.
+
+ :param message: the message to print
+ :param file: the file to write to (defaults to ``stdout``)
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``. This is faster and easier than calling
+ :func:`get_text_stderr` yourself.
+ :param nl: if set to `True` (the default) a newline is printed afterwards.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection.
+ """
+ if file is None:
+ if err:
+ file = _default_text_stderr()
+ else:
+ file = _default_text_stdout()
+
+ # Convert non bytes/text into the native string type.
+ if message is not None and not isinstance(message, echo_native_types):
+ message = str(message)
+
+ if nl:
+ message = message or ""
+ if isinstance(message, str):
+ message += "\n"
+ else:
+ message += b"\n"
+
+ # If there is a message and the value looks like bytes, we manually
+ # need to find the binary stream and write the message in there.
+ # This is done separately so that most stream types will work as you
+ # would expect. Eg: you can write to StringIO for other cases.
+ if message and is_bytes(message):
+ binary_file = _find_binary_writer(file)
+ if binary_file is not None:
+ file.flush()
+ binary_file.write(message)
+ binary_file.flush()
+ return
+
+ # ANSI-style support. If there is no message or we are dealing with
+ # bytes nothing is happening. If we are connected to a file we want
+ # to strip colors. If we are on windows we either wrap the stream
+ # to strip the color or we use the colorama support to translate the
+ # ansi codes to API calls.
+ if message and not is_bytes(message):
+ color = resolve_color_default(color)
+ if should_strip_ansi(file, color):
+ message = strip_ansi(message)
+ elif WIN:
+ if auto_wrap_for_ansi is not None:
+ file = auto_wrap_for_ansi(file)
+ elif not color:
+ message = strip_ansi(message)
+
+ if message:
+ file.write(message)
+ file.flush()
+
+
+def get_binary_stream(name):
+ """Returns a system stream for byte processing.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ """
+ opener = binary_streams.get(name)
+ if opener is None:
+ raise TypeError(f"Unknown standard stream '{name}'")
+ return opener()
+
+
+def get_text_stream(name, encoding=None, errors="strict"):
+ """Returns a system stream for text processing. This usually returns
+ a wrapped stream around a binary stream returned from
+ :func:`get_binary_stream` but it also can take shortcuts for already
+ correctly configured streams.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ :param encoding: overrides the detected default encoding.
+ :param errors: overrides the default error mode.
+ """
+ opener = text_streams.get(name)
+ if opener is None:
+ raise TypeError(f"Unknown standard stream '{name}'")
+ return opener(encoding, errors)
+
+
+def open_file(
+ filename, mode="r", encoding=None, errors="strict", lazy=False, atomic=False
+):
+ """This is similar to how the :class:`File` works but for manual
+ usage. Files are opened non lazy by default. This can open regular
+ files as well as stdin/stdout if ``'-'`` is passed.
+
+ If stdin/stdout is returned the stream is wrapped so that the context
+ manager will not close the stream accidentally. This makes it possible
+ to always use the function like this without having to worry to
+ accidentally close a standard stream::
+
+ with open_file(filename) as f:
+ ...
+
+ .. versionadded:: 3.0
+
+ :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
+ :param mode: the mode in which to open the file.
+ :param encoding: the encoding to use.
+ :param errors: the error handling for this file.
+ :param lazy: can be flipped to true to open the file lazily.
+ :param atomic: in atomic mode writes go into a temporary file and it's
+ moved on close.
+ """
+ if lazy:
+ return LazyFile(filename, mode, encoding, errors, atomic=atomic)
+ f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
+ if not should_close:
+ f = KeepOpenFile(f)
+ return f
+
+
+def get_os_args():
+ """Returns the argument part of ``sys.argv``, removing the first
+ value which is the name of the script.
+
+ .. deprecated:: 8.0
+ Will be removed in 8.1. Access ``sys.argv[1:]`` directly
+ instead.
+ """
+ import warnings
+
+ warnings.warn(
+ "'get_os_args' is deprecated and will be removed in 8.1. Access"
+ " 'sys.argv[1:]' directly instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return sys.argv[1:]
+
+
+def format_filename(filename, shorten=False):
+ """Formats a filename for user display. The main purpose of this
+ function is to ensure that the filename can be displayed at all. This
+ will decode the filename to unicode if necessary in a way that it will
+ not fail. Optionally, it can shorten the filename to not include the
+ full path to the filename.
+
+ :param filename: formats a filename for UI display. This will also convert
+ the filename into unicode without failing.
+ :param shorten: this optionally shortens the filename to strip of the
+ path that leads up to it.
+ """
+ if shorten:
+ filename = os.path.basename(filename)
+ return filename_to_ui(filename)
+
+
+def get_app_dir(app_name, roaming=True, force_posix=False):
+ r"""Returns the config folder for the application. The default behavior
+ is to return whatever is most appropriate for the operating system.
+
+ To give you an idea, for an app called ``"Foo Bar"``, something like
+ the following folders could be returned:
+
+ Mac OS X:
+ ``~/Library/Application Support/Foo Bar``
+ Mac OS X (POSIX):
+ ``~/.foo-bar``
+ Unix:
+ ``~/.config/foo-bar``
+ Unix (POSIX):
+ ``~/.foo-bar``
+ Win XP (roaming):
+ ``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
+ Win XP (not roaming):
+ ``C:\Documents and Settings\<user>\Application Data\Foo Bar``
+ Win 7 (roaming):
+ ``C:\Users\<user>\AppData\Roaming\Foo Bar``
+ Win 7 (not roaming):
+ ``C:\Users\<user>\AppData\Local\Foo Bar``
+
+ .. versionadded:: 2.0
+
+ :param app_name: the application name. This should be properly capitalized
+ and can contain whitespace.
+ :param roaming: controls if the folder should be roaming or not on Windows.
+ Has no affect otherwise.
+ :param force_posix: if this is set to `True` then on any POSIX system the
+ folder will be stored in the home folder with a leading
+ dot instead of the XDG config home or darwin's
+ application support folder.
+ """
+ if WIN:
+ key = "APPDATA" if roaming else "LOCALAPPDATA"
+ folder = os.environ.get(key)
+ if folder is None:
+ folder = os.path.expanduser("~")
+ return os.path.join(folder, app_name)
+ if force_posix:
+ return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}"))
+ if sys.platform == "darwin":
+ return os.path.join(
+ os.path.expanduser("~/Library/Application Support"), app_name
+ )
+ return os.path.join(
+ os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
+ _posixify(app_name),
+ )
+
+
+class PacifyFlushWrapper:
+ """This wrapper is used to catch and suppress BrokenPipeErrors resulting
+ from ``.flush()`` being called on broken pipe during the shutdown/final-GC
+ of the Python interpreter. Notably ``.flush()`` is always called on
+ ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
+ other cleanup code, and the case where the underlying file is not a broken
+ pipe, all calls and attributes are proxied.
+ """
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def flush(self):
+ try:
+ self.wrapped.flush()
+ except OSError as e:
+ import errno
+
+ if e.errno != errno.EPIPE:
+ raise
+
+ def __getattr__(self, attr):
+ return getattr(self.wrapped, attr)
diff --git a/libs/dynaconf/vendor/dotenv/__init__.py b/libs/dynaconf/vendor/dotenv/__init__.py
new file mode 100644
index 000000000..b88d9bc27
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/__init__.py
@@ -0,0 +1,46 @@
+from .compat import IS_TYPE_CHECKING
+from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv, dotenv_values
+
+if IS_TYPE_CHECKING:
+ from typing import Any, Optional
+
+
+def load_ipython_extension(ipython):
+ # type: (Any) -> None
+ from .ipython import load_ipython_extension
+ load_ipython_extension(ipython)
+
+
+def get_cli_string(path=None, action=None, key=None, value=None, quote=None):
+ # type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> str
+ """Returns a string suitable for running as a shell script.
+
+ Useful for converting a arguments passed to a fabric task
+ to be passed to a `local` or `run` command.
+ """
+ command = ['dotenv']
+ if quote:
+ command.append('-q %s' % quote)
+ if path:
+ command.append('-f %s' % path)
+ if action:
+ command.append(action)
+ if key:
+ command.append(key)
+ if value:
+ if ' ' in value:
+ command.append('"%s"' % value)
+ else:
+ command.append(value)
+
+ return ' '.join(command).strip()
+
+
+__all__ = ['get_cli_string',
+ 'load_dotenv',
+ 'dotenv_values',
+ 'get_key',
+ 'set_key',
+ 'unset_key',
+ 'find_dotenv',
+ 'load_ipython_extension']
diff --git a/libs/dynaconf/vendor/dotenv/cli.py b/libs/dynaconf/vendor/dotenv/cli.py
new file mode 100644
index 000000000..269b093a3
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/cli.py
@@ -0,0 +1,145 @@
+import os
+import sys
+from subprocess import Popen
+
+try:
+ from dynaconf.vendor import click
+except ImportError:
+ sys.stderr.write('It seems python-dotenv is not installed with cli option. \n'
+ 'Run pip install "python-dotenv[cli]" to fix this.')
+ sys.exit(1)
+
+from .compat import IS_TYPE_CHECKING, to_env
+from .main import dotenv_values, get_key, set_key, unset_key
+from .version import __version__
+
+if IS_TYPE_CHECKING:
+ from typing import Any, List, Dict
+
+
[email protected]('-f', '--file', default=os.path.join(os.getcwd(), '.env'),
+ type=click.Path(exists=True),
+ help="Location of the .env file, defaults to .env file in current working directory.")
[email protected]('-q', '--quote', default='always',
+ type=click.Choice(['always', 'never', 'auto']),
+ help="Whether to quote or not the variable values. Default mode is always. This does not affect parsing.")
[email protected]_option(version=__version__)
+def cli(ctx, file, quote):
+ # type: (click.Context, Any, Any) -> None
+ '''This script is used to set, get or unset values from a .env file.'''
+ ctx.obj = {}
+ ctx.obj['FILE'] = file
+ ctx.obj['QUOTE'] = quote
+
+
+def list(ctx):
+ # type: (click.Context) -> None
+ '''Display all the stored key/value.'''
+ file = ctx.obj['FILE']
+ dotenv_as_dict = dotenv_values(file)
+ for k, v in dotenv_as_dict.items():
+ click.echo('%s=%s' % (k, v))
+
+
[email protected]('key', required=True)
[email protected]('value', required=True)
+def set(ctx, key, value):
+ # type: (click.Context, Any, Any) -> None
+ '''Store the given key/value.'''
+ file = ctx.obj['FILE']
+ quote = ctx.obj['QUOTE']
+ success, key, value = set_key(file, key, value, quote)
+ if success:
+ click.echo('%s=%s' % (key, value))
+ else:
+ exit(1)
+
+
[email protected]('key', required=True)
+def get(ctx, key):
+ # type: (click.Context, Any) -> None
+ '''Retrieve the value for the given key.'''
+ file = ctx.obj['FILE']
+ stored_value = get_key(file, key)
+ if stored_value:
+ click.echo('%s=%s' % (key, stored_value))
+ else:
+ exit(1)
+
+
[email protected]('key', required=True)
+def unset(ctx, key):
+ # type: (click.Context, Any) -> None
+ '''Removes the given key.'''
+ file = ctx.obj['FILE']
+ quote = ctx.obj['QUOTE']
+ success, key = unset_key(file, key, quote)
+ if success:
+ click.echo("Successfully removed %s" % key)
+ else:
+ exit(1)
+
+
[email protected](context_settings={'ignore_unknown_options': True})
[email protected]('commandline', nargs=-1, type=click.UNPROCESSED)
+def run(ctx, commandline):
+ # type: (click.Context, List[str]) -> None
+ """Run command with environment variables present."""
+ file = ctx.obj['FILE']
+ dotenv_as_dict = {to_env(k): to_env(v) for (k, v) in dotenv_values(file).items() if v is not None}
+
+ if not commandline:
+ click.echo('No command given.')
+ exit(1)
+ ret = run_command(commandline, dotenv_as_dict)
+ exit(ret)
+
+
+def run_command(command, env):
+ # type: (List[str], Dict[str, str]) -> int
+ """Run command in sub process.
+
+ Runs the command in a sub process with the variables from `env`
+ added in the current environment variables.
+
+ Parameters
+ ----------
+ command: List[str]
+ The command and it's parameters
+ env: Dict
+ The additional environment variables
+
+ Returns
+ -------
+ int
+ The return code of the command
+
+ """
+ # copy the current environment variables and add the vales from
+ # `env`
+ cmd_env = os.environ.copy()
+ cmd_env.update(env)
+
+ p = Popen(command,
+ universal_newlines=True,
+ bufsize=0,
+ shell=False,
+ env=cmd_env)
+ _, _ = p.communicate()
+
+ return p.returncode
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/libs/dynaconf/vendor/dotenv/compat.py b/libs/dynaconf/vendor/dotenv/compat.py
new file mode 100644
index 000000000..f8089bf4c
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/compat.py
@@ -0,0 +1,49 @@
+import sys
+
+PY2 = sys.version_info[0] == 2 # type: bool
+
+if PY2:
+ from StringIO import StringIO # noqa
+else:
+ from io import StringIO # noqa
+
+
+def is_type_checking():
+ # type: () -> bool
+ try:
+ from typing import TYPE_CHECKING
+ except ImportError:
+ return False
+ return TYPE_CHECKING
+
+
+IS_TYPE_CHECKING = is_type_checking()
+
+
+if IS_TYPE_CHECKING:
+ from typing import Text
+
+
+def to_env(text):
+ # type: (Text) -> str
+ """
+ Encode a string the same way whether it comes from the environment or a `.env` file.
+ """
+ if PY2:
+ return text.encode(sys.getfilesystemencoding() or "utf-8")
+ else:
+ return text
+
+
+def to_text(string):
+ # type: (str) -> Text
+ """
+ Make a string Unicode if it isn't already.
+
+ This is useful for defining raw unicode strings because `ur"foo"` isn't valid in
+ Python 3.
+ """
+ if PY2:
+ return string.decode("utf-8")
+ else:
+ return string
diff --git a/libs/dynaconf/vendor/dotenv/ipython.py b/libs/dynaconf/vendor/dotenv/ipython.py
new file mode 100644
index 000000000..7f1b13d6c
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/ipython.py
@@ -0,0 +1,41 @@
+from __future__ import print_function
+
+from IPython.core.magic import Magics, line_magic, magics_class # type: ignore
+from IPython.core.magic_arguments import (argument, magic_arguments, # type: ignore
+ parse_argstring) # type: ignore
+
+from .main import find_dotenv, load_dotenv
+
+
+@magics_class
+class IPythonDotEnv(Magics):
+
+ @magic_arguments()
+ @argument(
+ '-o', '--override', action='store_true',
+ help="Indicate to override existing variables"
+ )
+ @argument(
+ '-v', '--verbose', action='store_true',
+ help="Indicate function calls to be verbose"
+ )
+ @argument('dotenv_path', nargs='?', type=str, default='.env',
+ help='Search in increasingly higher folders for the `dotenv_path`')
+ @line_magic
+ def dotenv(self, line):
+ args = parse_argstring(self.dotenv, line)
+ # Locate the .env file
+ dotenv_path = args.dotenv_path
+ try:
+ dotenv_path = find_dotenv(dotenv_path, True, True)
+ except IOError:
+ print("cannot find .env file")
+ return
+
+ # Load the .env file
+ load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
+
+
+def load_ipython_extension(ipython):
+ """Register the %dotenv magic."""
+ ipython.register_magics(IPythonDotEnv)
diff --git a/libs/dynaconf/vendor/dotenv/main.py b/libs/dynaconf/vendor/dotenv/main.py
new file mode 100644
index 000000000..c821ef73d
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/main.py
@@ -0,0 +1,323 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, print_function, unicode_literals
+
+import io
+import logging
+import os
+import re
+import shutil
+import sys
+import tempfile
+from collections import OrderedDict
+from contextlib import contextmanager
+
+from .compat import IS_TYPE_CHECKING, PY2, StringIO, to_env
+from .parser import Binding, parse_stream
+
+logger = logging.getLogger(__name__)
+
+if IS_TYPE_CHECKING:
+ from typing import (
+ Dict, Iterator, Match, Optional, Pattern, Union, Text, IO, Tuple
+ )
+ if sys.version_info >= (3, 6):
+ _PathLike = os.PathLike
+ else:
+ _PathLike = Text
+
+ if sys.version_info >= (3, 0):
+ _StringIO = StringIO
+ else:
+ _StringIO = StringIO[Text]
+
+__posix_variable = re.compile(
+ r"""
+ \$\{
+ (?P<name>[^\}:]*)
+ (?::-
+ (?P<default>[^\}]*)
+ )?
+ \}
+ """,
+ re.VERBOSE,
+) # type: Pattern[Text]
+
+
+def with_warn_for_invalid_lines(mappings):
+ # type: (Iterator[Binding]) -> Iterator[Binding]
+ for mapping in mappings:
+ if mapping.error:
+ logger.warning(
+ "Python-dotenv could not parse statement starting at line %s",
+ mapping.original.line,
+ )
+ yield mapping
+
+
+class DotEnv():
+
+ def __init__(self, dotenv_path, verbose=False, encoding=None, interpolate=True):
+ # type: (Union[Text, _PathLike, _StringIO], bool, Union[None, Text], bool) -> None
+ self.dotenv_path = dotenv_path # type: Union[Text,_PathLike, _StringIO]
+ self._dict = None # type: Optional[Dict[Text, Optional[Text]]]
+ self.verbose = verbose # type: bool
+ self.encoding = encoding # type: Union[None, Text]
+ self.interpolate = interpolate # type: bool
+
+ @contextmanager
+ def _get_stream(self):
+ # type: () -> Iterator[IO[Text]]
+ if isinstance(self.dotenv_path, StringIO):
+ yield self.dotenv_path
+ elif os.path.isfile(self.dotenv_path):
+ with io.open(self.dotenv_path, encoding=self.encoding) as stream:
+ yield stream
+ else:
+ if self.verbose:
+ logger.info("Python-dotenv could not find configuration file %s.", self.dotenv_path or '.env')
+ yield StringIO('')
+
+ def dict(self):
+ # type: () -> Dict[Text, Optional[Text]]
+ """Return dotenv as dict"""
+ if self._dict:
+ return self._dict
+
+ values = OrderedDict(self.parse())
+ self._dict = resolve_nested_variables(values) if self.interpolate else values
+ return self._dict
+
+ def parse(self):
+ # type: () -> Iterator[Tuple[Text, Optional[Text]]]
+ with self._get_stream() as stream:
+ for mapping in with_warn_for_invalid_lines(parse_stream(stream)):
+ if mapping.key is not None:
+ yield mapping.key, mapping.value
+
+ def set_as_environment_variables(self, override=False):
+ # type: (bool) -> bool
+ """
+ Load the current dotenv as system environemt variable.
+ """
+ for k, v in self.dict().items():
+ if k in os.environ and not override:
+ continue
+ if v is not None:
+ os.environ[to_env(k)] = to_env(v)
+
+ return True
+
+ def get(self, key):
+ # type: (Text) -> Optional[Text]
+ """
+ """
+ data = self.dict()
+
+ if key in data:
+ return data[key]
+
+ if self.verbose:
+ logger.warning("Key %s not found in %s.", key, self.dotenv_path)
+
+ return None
+
+
+def get_key(dotenv_path, key_to_get):
+ # type: (Union[Text, _PathLike], Text) -> Optional[Text]
+ """
+ Gets the value of a given key from the given .env
+
+ If the .env path given doesn't exist, fails
+ """
+ return DotEnv(dotenv_path, verbose=True).get(key_to_get)
+
+
+@contextmanager
+def rewrite(path):
+ # type: (_PathLike) -> Iterator[Tuple[IO[Text], IO[Text]]]
+ try:
+ with tempfile.NamedTemporaryFile(mode="w+", delete=False) as dest:
+ with io.open(path) as source:
+ yield (source, dest) # type: ignore
+ except BaseException:
+ if os.path.isfile(dest.name):
+ os.unlink(dest.name)
+ raise
+ else:
+ shutil.move(dest.name, path)
+
+
+def set_key(dotenv_path, key_to_set, value_to_set, quote_mode="always"):
+ # type: (_PathLike, Text, Text, Text) -> Tuple[Optional[bool], Text, Text]
+ """
+ Adds or Updates a key/value to the given .env
+
+ If the .env path given doesn't exist, fails instead of risking creating
+ an orphan .env somewhere in the filesystem
+ """
+ value_to_set = value_to_set.strip("'").strip('"')
+ if not os.path.exists(dotenv_path):
+ logger.warning("Can't write to %s - it doesn't exist.", dotenv_path)
+ return None, key_to_set, value_to_set
+
+ if " " in value_to_set:
+ quote_mode = "always"
+
+ if quote_mode == "always":
+ value_out = '"{}"'.format(value_to_set.replace('"', '\\"'))
+ else:
+ value_out = value_to_set
+ line_out = "{}={}\n".format(key_to_set, value_out)
+
+ with rewrite(dotenv_path) as (source, dest):
+ replaced = False
+ for mapping in with_warn_for_invalid_lines(parse_stream(source)):
+ if mapping.key == key_to_set:
+ dest.write(line_out)
+ replaced = True
+ else:
+ dest.write(mapping.original.string)
+ if not replaced:
+ dest.write(line_out)
+
+ return True, key_to_set, value_to_set
+
+
+def unset_key(dotenv_path, key_to_unset, quote_mode="always"):
+ # type: (_PathLike, Text, Text) -> Tuple[Optional[bool], Text]
+ """
+ Removes a given key from the given .env
+
+ If the .env path given doesn't exist, fails
+ If the given key doesn't exist in the .env, fails
+ """
+ if not os.path.exists(dotenv_path):
+ logger.warning("Can't delete from %s - it doesn't exist.", dotenv_path)
+ return None, key_to_unset
+
+ removed = False
+ with rewrite(dotenv_path) as (source, dest):
+ for mapping in with_warn_for_invalid_lines(parse_stream(source)):
+ if mapping.key == key_to_unset:
+ removed = True
+ else:
+ dest.write(mapping.original.string)
+
+ if not removed:
+ logger.warning("Key %s not removed from %s - key doesn't exist.", key_to_unset, dotenv_path)
+ return None, key_to_unset
+
+ return removed, key_to_unset
+
+
+def resolve_nested_variables(values):
+ # type: (Dict[Text, Optional[Text]]) -> Dict[Text, Optional[Text]]
+ def _replacement(name, default):
+ # type: (Text, Optional[Text]) -> Text
+ """
+ get appropriate value for a variable name.
+ first search in environ, if not found,
+ then look into the dotenv variables
+ """
+ default = default if default is not None else ""
+ ret = os.getenv(name, new_values.get(name, default))
+ return ret # type: ignore
+
+ def _re_sub_callback(match):
+ # type: (Match[Text]) -> Text
+ """
+ From a match object gets the variable name and returns
+ the correct replacement
+ """
+ matches = match.groupdict()
+ return _replacement(name=matches["name"], default=matches["default"]) # type: ignore
+
+ new_values = {}
+
+ for k, v in values.items():
+ new_values[k] = __posix_variable.sub(_re_sub_callback, v) if v is not None else None
+
+ return new_values
+
+
+def _walk_to_root(path):
+ # type: (Text) -> Iterator[Text]
+ """
+ Yield directories starting from the given directory up to the root
+ """
+ if not os.path.exists(path):
+ raise IOError('Starting path not found')
+
+ if os.path.isfile(path):
+ path = os.path.dirname(path)
+
+ last_dir = None
+ current_dir = os.path.abspath(path)
+ while last_dir != current_dir:
+ yield current_dir
+ parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))
+ last_dir, current_dir = current_dir, parent_dir
+
+
+def find_dotenv(filename='.env', raise_error_if_not_found=False, usecwd=False):
+ # type: (Text, bool, bool) -> Text
+ """
+ Search in increasingly higher folders for the given file
+
+ Returns path to the file if found, or an empty string otherwise
+ """
+
+ def _is_interactive():
+ """ Decide whether this is running in a REPL or IPython notebook """
+ main = __import__('__main__', None, None, fromlist=['__file__'])
+ return not hasattr(main, '__file__')
+
+ if usecwd or _is_interactive() or getattr(sys, 'frozen', False):
+ # Should work without __file__, e.g. in REPL or IPython notebook.
+ path = os.getcwd()
+ else:
+ # will work for .py files
+ frame = sys._getframe()
+ # find first frame that is outside of this file
+ if PY2 and not __file__.endswith('.py'):
+ # in Python2 __file__ extension could be .pyc or .pyo (this doesn't account
+ # for edge case of Python compiled for non-standard extension)
+ current_file = __file__.rsplit('.', 1)[0] + '.py'
+ else:
+ current_file = __file__
+
+ while frame.f_code.co_filename == current_file:
+ assert frame.f_back is not None
+ frame = frame.f_back
+ frame_filename = frame.f_code.co_filename
+ path = os.path.dirname(os.path.abspath(frame_filename))
+
+ for dirname in _walk_to_root(path):
+ check_path = os.path.join(dirname, filename)
+ if os.path.isfile(check_path):
+ return check_path
+
+ if raise_error_if_not_found:
+ raise IOError('File not found')
+
+ return ''
+
+
+def load_dotenv(dotenv_path=None, stream=None, verbose=False, override=False, interpolate=True, **kwargs):
+ # type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, bool, bool, Union[None, Text]) -> bool
+ """Parse a .env file and then load all the variables found as environment variables.
+
+ - *dotenv_path*: absolute or relative path to .env file.
+ - *stream*: `StringIO` object with .env content.
+ - *verbose*: whether to output the warnings related to missing .env file etc. Defaults to `False`.
+ - *override*: where to override the system environment variables with the variables in `.env` file.
+ Defaults to `False`.
+ """
+ f = dotenv_path or stream or find_dotenv()
+ return DotEnv(f, verbose=verbose, interpolate=interpolate, **kwargs).set_as_environment_variables(override=override)
+
+
+def dotenv_values(dotenv_path=None, stream=None, verbose=False, interpolate=True, **kwargs):
+ # type: (Union[Text, _PathLike, None], Optional[_StringIO], bool, bool, Union[None, Text]) -> Dict[Text, Optional[Text]] # noqa: E501
+ f = dotenv_path or stream or find_dotenv()
+ return DotEnv(f, verbose=verbose, interpolate=interpolate, **kwargs).dict()
diff --git a/libs/dynaconf/vendor/dotenv/parser.py b/libs/dynaconf/vendor/dotenv/parser.py
new file mode 100644
index 000000000..2c93cbd01
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/parser.py
@@ -0,0 +1,237 @@
+import codecs
+import re
+
+from .compat import IS_TYPE_CHECKING, to_text
+
+if IS_TYPE_CHECKING:
+ from typing import ( # noqa:F401
+ IO, Iterator, Match, NamedTuple, Optional, Pattern, Sequence, Text,
+ Tuple
+ )
+
+
+def make_regex(string, extra_flags=0):
+ # type: (str, int) -> Pattern[Text]
+ return re.compile(to_text(string), re.UNICODE | extra_flags)
+
+
+_newline = make_regex(r"(\r\n|\n|\r)")
+_multiline_whitespace = make_regex(r"\s*", extra_flags=re.MULTILINE)
+_whitespace = make_regex(r"[^\S\r\n]*")
+_export = make_regex(r"(?:export[^\S\r\n]+)?")
+_single_quoted_key = make_regex(r"'([^']+)'")
+_unquoted_key = make_regex(r"([^=\#\s]+)")
+_equal_sign = make_regex(r"(=[^\S\r\n]*)")
+_single_quoted_value = make_regex(r"'((?:\\'|[^'])*)'")
+_double_quoted_value = make_regex(r'"((?:\\"|[^"])*)"')
+_unquoted_value_part = make_regex(r"([^ \r\n]*)")
+_comment = make_regex(r"(?:[^\S\r\n]*#[^\r\n]*)?")
+_end_of_line = make_regex(r"[^\S\r\n]*(?:\r\n|\n|\r|$)")
+_rest_of_line = make_regex(r"[^\r\n]*(?:\r|\n|\r\n)?")
+_double_quote_escapes = make_regex(r"\\[\\'\"abfnrtv]")
+_single_quote_escapes = make_regex(r"\\[\\']")
+
+
+try:
+ # this is necessary because we only import these from typing
+ # when we are type checking, and the linter is upset if we
+ # re-import
+ import typing
+
+ Original = typing.NamedTuple(
+ "Original",
+ [
+ ("string", typing.Text),
+ ("line", int),
+ ],
+ )
+
+ Binding = typing.NamedTuple(
+ "Binding",
+ [
+ ("key", typing.Optional[typing.Text]),
+ ("value", typing.Optional[typing.Text]),
+ ("original", Original),
+ ("error", bool),
+ ],
+ )
+except ImportError:
+ from collections import namedtuple
+ Original = namedtuple( # type: ignore
+ "Original",
+ [
+ "string",
+ "line",
+ ],
+ )
+ Binding = namedtuple( # type: ignore
+ "Binding",
+ [
+ "key",
+ "value",
+ "original",
+ "error",
+ ],
+ )
+
+
+class Position:
+ def __init__(self, chars, line):
+ # type: (int, int) -> None
+ self.chars = chars
+ self.line = line
+
+ @classmethod
+ def start(cls):
+ # type: () -> Position
+ return cls(chars=0, line=1)
+
+ def set(self, other):
+ # type: (Position) -> None
+ self.chars = other.chars
+ self.line = other.line
+
+ def advance(self, string):
+ # type: (Text) -> None
+ self.chars += len(string)
+ self.line += len(re.findall(_newline, string))
+
+
+class Error(Exception):
+ pass
+
+
+class Reader:
+ def __init__(self, stream):
+ # type: (IO[Text]) -> None
+ self.string = stream.read()
+ self.position = Position.start()
+ self.mark = Position.start()
+
+ def has_next(self):
+ # type: () -> bool
+ return self.position.chars < len(self.string)
+
+ def set_mark(self):
+ # type: () -> None
+ self.mark.set(self.position)
+
+ def get_marked(self):
+ # type: () -> Original
+ return Original(
+ string=self.string[self.mark.chars:self.position.chars],
+ line=self.mark.line,
+ )
+
+ def peek(self, count):
+ # type: (int) -> Text
+ return self.string[self.position.chars:self.position.chars + count]
+
+ def read(self, count):
+ # type: (int) -> Text
+ result = self.string[self.position.chars:self.position.chars + count]
+ if len(result) < count:
+ raise Error("read: End of string")
+ self.position.advance(result)
+ return result
+
+ def read_regex(self, regex):
+ # type: (Pattern[Text]) -> Sequence[Text]
+ match = regex.match(self.string, self.position.chars)
+ if match is None:
+ raise Error("read_regex: Pattern not found")
+ self.position.advance(self.string[match.start():match.end()])
+ return match.groups()
+
+
+def decode_escapes(regex, string):
+ # type: (Pattern[Text], Text) -> Text
+ def decode_match(match):
+ # type: (Match[Text]) -> Text
+ return codecs.decode(match.group(0), 'unicode-escape') # type: ignore
+
+ return regex.sub(decode_match, string)
+
+
+def parse_key(reader):
+ # type: (Reader) -> Optional[Text]
+ char = reader.peek(1)
+ if char == "#":
+ return None
+ elif char == "'":
+ (key,) = reader.read_regex(_single_quoted_key)
+ else:
+ (key,) = reader.read_regex(_unquoted_key)
+ return key
+
+
+def parse_unquoted_value(reader):
+ # type: (Reader) -> Text
+ value = u""
+ while True:
+ (part,) = reader.read_regex(_unquoted_value_part)
+ value += part
+ after = reader.peek(2)
+ if len(after) < 2 or after[0] in u"\r\n" or after[1] in u" #\r\n":
+ return value
+ value += reader.read(2)
+
+
+def parse_value(reader):
+ # type: (Reader) -> Text
+ char = reader.peek(1)
+ if char == u"'":
+ (value,) = reader.read_regex(_single_quoted_value)
+ return decode_escapes(_single_quote_escapes, value)
+ elif char == u'"':
+ (value,) = reader.read_regex(_double_quoted_value)
+ return decode_escapes(_double_quote_escapes, value)
+ elif char in (u"", u"\n", u"\r"):
+ return u""
+ else:
+ return parse_unquoted_value(reader)
+
+
+def parse_binding(reader):
+ # type: (Reader) -> Binding
+ reader.set_mark()
+ try:
+ reader.read_regex(_multiline_whitespace)
+ if not reader.has_next():
+ return Binding(
+ key=None,
+ value=None,
+ original=reader.get_marked(),
+ error=False,
+ )
+ reader.read_regex(_export)
+ key = parse_key(reader)
+ reader.read_regex(_whitespace)
+ if reader.peek(1) == "=":
+ reader.read_regex(_equal_sign)
+ value = parse_value(reader) # type: Optional[Text]
+ else:
+ value = None
+ reader.read_regex(_comment)
+ reader.read_regex(_end_of_line)
+ return Binding(
+ key=key,
+ value=value,
+ original=reader.get_marked(),
+ error=False,
+ )
+ except Error:
+ reader.read_regex(_rest_of_line)
+ return Binding(
+ key=None,
+ value=None,
+ original=reader.get_marked(),
+ error=True,
+ )
+
+
+def parse_stream(stream):
+ # type: (IO[Text]) -> Iterator[Binding]
+ reader = Reader(stream)
+ while reader.has_next():
+ yield parse_binding(reader)
diff --git a/libs/dynaconf/vendor/dotenv/py.typed b/libs/dynaconf/vendor/dotenv/py.typed
new file mode 100644
index 000000000..7632ecf77
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561
diff --git a/libs/dynaconf/vendor/dotenv/version.py b/libs/dynaconf/vendor/dotenv/version.py
new file mode 100644
index 000000000..f23a6b39d
--- /dev/null
+++ b/libs/dynaconf/vendor/dotenv/version.py
@@ -0,0 +1 @@
+__version__ = "0.13.0"
diff --git a/libs/dynaconf/vendor/ruamel/__init__.py b/libs/dynaconf/vendor/ruamel/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/__init__.py
diff --git a/libs/dynaconf/vendor/ruamel/yaml/CHANGES b/libs/dynaconf/vendor/ruamel/yaml/CHANGES
new file mode 100644
index 000000000..a70a8eff9
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/CHANGES
@@ -0,0 +1,957 @@
+[0, 16, 10]: 2020-02-12
+ - (auto) updated image references in README to sourceforge
+
+[0, 16, 9]: 2020-02-11
+ - update CHANGES
+
+[0, 16, 8]: 2020-02-11
+ - update requirements so that ruamel.yaml.clib is installed for 3.8,
+ as it has become available (via manylinux builds)
+
+[0, 16, 7]: 2020-01-30
+ - fix typchecking issue on TaggedScalar (reported by Jens Nielsen)
+ - fix error in dumping literal scalar in sequence with comments before element
+ (reported by `EJ Etherington <https://sourceforge.net/u/ejether/>`__)
+
+[0, 16, 6]: 2020-01-20
+ - fix empty string mapping key roundtripping with preservation of quotes as `? ''`
+ (reported via email by Tomer Aharoni).
+ - fix incorrect state setting in class constructor (reported by `Douglas Raillard
+ <https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/>`__)
+ - adjust deprecation warning test for Hashable, as that no longer warns (reported
+ by `Jason Montleon <https://bitbucket.org/%7B8f377d12-8d5b-4069-a662-00a2674fee4e%7D/>`__)
+
+[0, 16, 5]: 2019-08-18
+ - allow for ``YAML(typ=['unsafe', 'pytypes'])``
+
+[0, 16, 4]: 2019-08-16
+ - fix output of TAG directives with # (reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+
+[0, 16, 3]: 2019-08-15
+ - move setting of version based on YAML directive to scanner, allowing to
+ check for file version during TAG directive scanning
+
+[0, 16, 2]: 2019-08-15
+ - preserve YAML and TAG directives on roundtrip, correctly output #
+ in URL for YAML 1.2 (both reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+[0, 16, 1]: 2019-08-08
+ - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz
+ <https://bitbucket.org/%7B9af55900-2534-4212-976c-61339b6ffe14%7D/>`__)
+ - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by
+ `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+[0, 16, 0]: 2019-07-25
+ - split of C source that generates .so file to ruamel.yaml.clib
+ - duplicate keys are now an error when working with the old API as well
+
+[0, 15, 100]: 2019-07-17
+ - fixing issue with dumping deep-copied data from commented YAML, by
+ providing both the memo parameter to __deepcopy__, and by allowing
+ startmarks to be compared on their content (reported by `Theofilos
+ Petsios
+ <https://bitbucket.org/%7Be550bc5d-403d-4fda-820b-bebbe71796d3%7D/>`__)
+
+[0, 15, 99]: 2019-07-12
+ - add `py.typed` to distribution, based on a PR submitted by
+ `Michael Crusoe
+ <https://bitbucket.org/%7Bc9fbde69-e746-48f5-900d-34992b7860c8%7D/>`__
+ - merge PR 40 (also by Michael Crusoe) to more accurately specify
+ repository in the README (also reported in a misunderstood issue
+ some time ago)
+
+[0, 15, 98]: 2019-07-09
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed
+ for Python 3.8.0b2 (reported by `John Vandenberg
+ <https://bitbucket.org/%7B6d4e8487-3c97-4dab-a060-088ec50c682c%7D/>`__)
+
+[0, 15, 97]: 2019-06-06
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for
+ Python 3.8.0b1
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for
+ Python 3.8.0a4 (reported by `Anthony Sottile
+ <https://bitbucket.org/%7B569cc8ea-0d9e-41cb-94a4-19ea517324df%7D/>`__)
+
+[0, 15, 96]: 2019-05-16
+ - fix failure to indent comments on round-trip anchored block style
+ scalars in block sequence (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+
+[0, 15, 95]: 2019-05-16
+ - fix failure to round-trip anchored scalars in block sequence
+ (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+ - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18
+ <https://www.python.org/dev/peps/pep-0429/>`__)
+
+[0, 15, 94]: 2019-04-23
+ - fix missing line-break after end-of-file comments not ending in
+ line-break (reported by `Philip Thompson
+ <https://bitbucket.org/%7Be42ba205-0876-4151-bcbe-ccaea5bd13ce%7D/>`__)
+
+[0, 15, 93]: 2019-04-21
+ - fix failure to parse empty implicit flow mapping key
+ - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now
+ correctly recognised as booleans and such strings dumped quoted
+ (reported by `Marcel Bollmann
+ <https://bitbucket.org/%7Bd8850921-9145-4ad0-ac30-64c3bd9b036d%7D/>`__)
+
+[0, 15, 92]: 2019-04-16
+ - fix failure to parse empty implicit block mapping key (reported by
+ `Nolan W <https://bitbucket.org/i2labs/>`__)
+
+[0, 15, 91]: 2019-04-05
+ - allowing duplicate keys would not work for merge keys (reported by mamacdon on
+ `StackOverflow <https://stackoverflow.com/questions/55540686/>`__
+
+[0, 15, 90]: 2019-04-04
+ - fix issue with updating `CommentedMap` from list of tuples (reported by
+ `Peter Henry <https://bitbucket.org/mosbasik/>`__)
+
+[0, 15, 89]: 2019-02-27
+ - fix for items with flow-mapping in block sequence output on single line
+ (reported by `Zahari Dim <https://bitbucket.org/zahari_dim/>`__)
+ - fix for safe dumping erroring in creation of representereror when dumping namedtuple
+ (reported and solution by `Jaakko Kantojärvi <https://bitbucket.org/raphendyr/>`__)
+
+[0, 15, 88]: 2019-02-12
+ - fix inclusing of python code from the subpackage data (containing extra tests,
+ reported by `Florian Apolloner <https://bitbucket.org/apollo13/>`__)
+
+[0, 15, 87]: 2019-01-22
+ - fix problem with empty lists and the code to reinsert merge keys (reported via email
+ by Zaloo)
+
+[0, 15, 86]: 2019-01-16
+ - reinsert merge key in its old position (reported by grumbler on
+ <Stackoverflow <https://stackoverflow.com/a/54206512/1307905>`__)
+ - fix for issue with non-ASCII anchor names (reported and fix
+ provided by Dandaleon Flux via email)
+ - fix for issue when parsing flow mapping value starting with colon (in pure Python only)
+ (reported by `FichteFoll <https://bitbucket.org/FichteFoll/>`__)
+
+[0, 15, 85]: 2019-01-08
+ - the types used by `SafeConstructor` for mappings and sequences can
+ now by set by assigning to `XXXConstructor.yaml_base_dict_type`
+ (and `..._list_type`), preventing the need to copy two methods
+ with 50+ lines that had `var = {}` hardcoded. (Implemented to
+ help solve an feature request by `Anthony Sottile
+ <https://bitbucket.org/asottile/>`__ in an easier way)
+
+[0, 15, 84]: 2019-01-07
+ - fix for `CommentedMap.copy()` not returning `CommentedMap`, let alone copying comments etc.
+ (reported by `Anthony Sottile <https://bitbucket.org/asottile/>`__)
+
+[0, 15, 83]: 2019-01-02
+ - fix for bug in roundtripping aliases used as key (reported via email by Zaloo)
+
+[0, 15, 82]: 2018-12-28
+ - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors
+ do not need a referring alias for these (reported by
+ `Alex Harvey <https://bitbucket.org/alexharv074/>`__)
+ - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo
+ <https://bitbucket.org/zaloo/>`__)
+
+[0, 15, 81]: 2018-12-06
+ - fix issue saving methods of metaclass derived classes (reported and fix provided
+ by `Douglas Raillard <https://bitbucket.org/DouglasRaillard/>`__)
+
+[0, 15, 80]: 2018-11-26
+ - fix issue emitting BEL character when round-tripping invalid folded input
+ (reported by Isaac on `StackOverflow <https://stackoverflow.com/a/53471217/1307905>`__)
+
+[0, 15, 79]: 2018-11-21
+ - fix issue with anchors nested deeper than alias (reported by gaFF on
+ `StackOverflow <https://stackoverflow.com/a/53397781/1307905>`__)
+
+[0, 15, 78]: 2018-11-15
+ - fix setup issue for 3.8 (reported by `Sidney Kuyateh
+ <https://bitbucket.org/autinerd/>`__)
+
+[0, 15, 77]: 2018-11-09
+ - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent
+ explicit sorting by keys in the base representer of mappings. Roundtrip
+ already did not do this. Usage only makes real sense for Python 3.6+
+ (feature request by `Sebastian Gerber <https://bitbucket.org/spacemanspiff2007/>`__).
+ - implement Python version check in YAML metadata in ``_test/test_z_data.py``
+
+[0, 15, 76]: 2018-11-01
+ - fix issue with empty mapping and sequence loaded as flow-style
+ (mapping reported by `Min RK <https://bitbucket.org/minrk/>`__, sequence
+ by `Maged Ahmed <https://bitbucket.org/maged2/>`__)
+
+[0, 15, 75]: 2018-10-27
+ - fix issue with single '?' scalar (reported by `Terrance
+ <https://bitbucket.org/OllieTerrance/>`__)
+ - fix issue with duplicate merge keys (prompted by `answering
+ <https://stackoverflow.com/a/52852106/1307905>`__ a
+ `StackOverflow question <https://stackoverflow.com/q/52851168/1307905>`__
+ by `math <https://stackoverflow.com/users/1355634/math>`__)
+
+[0, 15, 74]: 2018-10-17
+ - fix dropping of comment on rt before sequence item that is sequence item
+ (reported by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+
+[0, 15, 73]: 2018-10-16
+ - fix irregular output on pre-comment in sequence within sequence (reported
+ by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+ - allow non-compact (i.e. next line) dumping sequence/mapping within sequence.
+
+[0, 15, 72]: 2018-10-06
+ - fix regression on explicit 1.1 loading with the C based scanner/parser
+ (reported by `Tomas Vavra <https://bitbucket.org/xtomik/>`__)
+
+[0, 15, 71]: 2018-09-26
+ - fix regression where handcrafted CommentedMaps could not be initiated (reported by
+ `Dan Helfman <https://bitbucket.org/dhelfman/>`__)
+ - fix regression with non-root literal scalars that needed indent indicator
+ (reported by `Clark Breyman <https://bitbucket.org/clarkbreyman/>`__)
+ - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+[0, 15, 70]: 2018-09-21
+ - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list,
+ reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON
+ dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``.
+ (Proposed by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__, with feedback
+ from `blhsing <https://stackoverflow.com/users/6890912/blhsing>`__ on
+ `StackOverflow <https://stackoverflow.com/q/52314186/1307905>`__)
+
+[0, 15, 69]: 2018-09-20
+ - fix issue with dump_all gobbling end-of-document comments on parsing
+ (reported by `Pierre B. <https://bitbucket.org/octplane/>`__)
+
+[0, 15, 68]: 2018-09-20
+ - fix issue with parsabel, but incorrect output with nested flow-style sequences
+ (reported by `Dougal Seeley <https://bitbucket.org/dseeley/>`__)
+ - fix issue with loading Python objects that have __setstate__ and recursion in parameters
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+[0, 15, 67]: 2018-09-19
+ - fix issue with extra space inserted with non-root literal strings
+ (Issue reported and PR with fix provided by
+ `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+
+[0, 15, 66]: 2018-09-07
+ - fix issue with fold indicating characters inserted in safe_load-ed folded strings
+ (reported by `Maximilian Hils <https://bitbucket.org/mhils/>`__).
+
+[0, 15, 65]: 2018-09-07
+ - fix issue #232 revert to throw ParserError for unexcpected ``]``
+ and ``}`` instead of IndexError. (Issue reported and PR with fix
+ provided by `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+ - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email)
+ - indent root level literal scalars that have directive or document end markers
+ at the beginning of a line
+
+[0, 15, 64]: 2018-08-30
+ - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]``
+ - single entry mappings in flow sequences now written by default without quotes
+ set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force
+ getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]``
+ - fix issue when roundtripping floats starting with a dot such as ``.5``
+ (reported by `Harrison Gregg <https://bitbucket.org/HarrisonGregg/>`__)
+
+[0, 15, 63]: 2018-08-29
+ - small fix only necessary for Windows users that don't use wheels.
+
+[0, 15, 62]: 2018-08-29
+ - C based reader/scanner & emitter now allow setting of 1.2 as YAML version.
+ ** The loading/dumping is still YAML 1.1 code**, so use the common subset of
+ YAML 1.2 and 1.1 (reported by `Ge Yang <https://bitbucket.org/yangge/>`__)
+
+[0, 15, 61]: 2018-08-23
+ - support for round-tripping folded style scalars (initially requested
+ by `Johnathan Viduchinsky <https://bitbucket.org/johnathanvidu/>`__)
+ - update of C code
+ - speed up of scanning (~30% depending on the input)
+
+[0, 15, 60]: 2018-08-18
+ - cleanup for mypy
+ - spurious print in library (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__), now automatically checked
+
+[0, 15, 59]: 2018-08-17
+ - issue with C based loader and leading zeros (reported by
+ `Tom Hamilton Stubber <https://bitbucket.org/TomHamiltonStubber/>`__)
+
+[0, 15, 58]: 2018-08-17
+ - simple mappings can now be used as keys when round-tripping::
+
+ {a: 1, b: 2}: hello world
+
+ although using the obvious operations (del, popitem) on the key will
+ fail, you can mutilate it by going through its attributes. If you load the
+ above YAML in `d`, then changing the value is cumbersome:
+
+ d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"}
+
+ and changing the key even more so:
+
+ d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop(
+ CommentedKeyMap([('a', 1), ('b', 2)]))
+
+ (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result
+ in a different order, of the keys of the key, in the output)
+ - check integers to dump with 1.2 patterns instead of 1.1 (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__)
+
+
+[0, 15, 57]: 2018-08-15
+ - Fix that CommentedSeq could no longer be used in adding or do a copy
+ (reported by `Christopher Wright <https://bitbucket.org/CJ-Wright4242/>`__)
+
+[0, 15, 56]: 2018-08-15
+ - fix issue with ``python -O`` optimizing away code (reported, and detailed cause
+ pinpointed, by `Alex Grönholm <https://bitbucket.org/agronholm/>`__
+
+[0, 15, 55]: 2018-08-14
+
+ - unmade ``CommentedSeq`` a subclass of ``list``. It is now
+ indirectly a subclass of the standard
+ ``collections.abc.MutableSequence`` (without .abc if you are
+ still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'),
+ list)``) anywhere in your code replace ``list`` with
+ ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of
+ the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``,
+ with the result that *(extended) slicing is supported on
+ ``CommentedSeq``*.
+ (reported by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__)
+ - duplicate keys (or their values) with non-ascii now correctly
+ report in Python2, instead of raising a Unicode error.
+ (Reported by `Jonathan Pyle <https://bitbucket.org/jonathan_pyle/>`__)
+
+[0, 15, 54]: 2018-08-13
+
+ - fix issue where a comment could pop-up twice in the output (reported by
+ `Mike Kazantsev <https://bitbucket.org/mk_fg/>`__ and by
+ `Nate Peterson <https://bitbucket.org/ndpete21/>`__)
+ - fix issue where JSON object (mapping) without spaces was not parsed
+ properly (reported by `Marc Schmidt <https://bitbucket.org/marcj/>`__)
+ - fix issue where comments after empty flow-style mappings were not emitted
+ (reported by `Qinfench Chen <https://bitbucket.org/flyin5ish/>`__)
+
+[0, 15, 53]: 2018-08-12
+ - fix issue with flow style mapping with comments gobbled newline (reported
+ by `Christopher Lambert <https://bitbucket.org/XN137/>`__)
+ - fix issue where single '+' under YAML 1.2 was interpreted as
+ integer, erroring out (reported by `Jethro Yu
+ <https://bitbucket.org/jcppkkk/>`__)
+
+[0, 15, 52]: 2018-08-09
+ - added `.copy()` mapping representation for round-tripping
+ (``CommentedMap``) to fix incomplete copies of merged mappings
+ (reported by `Will Richards
+ <https://bitbucket.org/will_richards/>`__)
+ - Also unmade that class a subclass of ordereddict to solve incorrect behaviour
+ for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported by
+ `Filip Matzner <https://bitbucket.org/FloopCZ/>`__)
+
+[0, 15, 51]: 2018-08-08
+ - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard
+ <https://bitbucket.org/DouglasRaillard/>`__)
+ - Fix spurious trailing white-space caused when the comment start
+ column was no longer reached and there was no actual EOL comment
+ (e.g. following empty line) and doing substitutions, or when
+ quotes around scalars got dropped. (reported by `Thomas Guillet
+ <https://bitbucket.org/guillett/>`__)
+
+[0, 15, 50]: 2018-08-05
+ - Allow ``YAML()`` as a context manager for output, thereby making it much easier
+ to generate multi-documents in a stream.
+ - Fix issue with incorrect type information for `load()` and `dump()` (reported
+ by `Jimbo Jim <https://bitbucket.org/jimbo1qaz/>`__)
+
+[0, 15, 49]: 2018-08-05
+ - fix preservation of leading newlines in root level literal style scalar,
+ and preserve comment after literal style indicator (``| # some comment``)
+ Both needed for round-tripping multi-doc streams in
+ `ryd <https://pypi.org/project/ryd/>`__.
+
+[0, 15, 48]: 2018-08-03
+ - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity
+
+[0, 15, 47]: 2018-07-31
+ - fix broken 3.6 manylinux1 (result of an unclean ``build`` (reported by
+ `Roman Sichnyi <https://bitbucket.org/rsichnyi-gl/>`__)
+
+
+[0, 15, 46]: 2018-07-29
+ - fixed DeprecationWarning for importing from ``collections`` on 3.7
+ (issue 210, reported by `Reinoud Elhorst
+ <https://bitbucket.org/reinhrst/>`__). It was `difficult to find
+ why tox/pytest did not report
+ <https://stackoverflow.com/q/51573204/1307905>`__ and as time
+ consuming to actually `fix
+ <https://stackoverflow.com/a/51573205/1307905>`__ the tests.
+
+[0, 15, 45]: 2018-07-26
+ - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration
+ (PR provided by `Zachary Buhman <https://bitbucket.org/buhman/>`__,
+ also reported by `Steven Hiscocks <https://bitbucket.org/sdhiscocks/>`__.
+
+[0, 15, 44]: 2018-07-14
+ - Correct loading plain scalars consisting of numerals only and
+ starting with `0`, when not explicitly specifying YAML version
+ 1.1. This also fixes the issue about dumping string `'019'` as
+ plain scalars as reported by `Min RK
+ <https://bitbucket.org/minrk/>`__, that prompted this chance.
+
+[0, 15, 43]: 2018-07-12
+ - merge PR33: Python2.7 on Windows is narrow, but has no
+ ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__)
+ - ``register_class()`` now returns class (proposed by
+ `Mike Nerone <https://bitbucket.org/Manganeez/>`__}
+
+[0, 15, 42]: 2018-07-01
+ - fix regression showing only on narrow Python 2.7 (py27mu) builds
+ (with help from
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__ and
+ `Colm O'Connor <>`__).
+ - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as
+ 3.4/3.5/3.6/3.7/pypy
+
+[0, 15, 41]: 2018-06-27
+ - add detection of C-compile failure (investigation prompted by
+ `StackOverlow <https://stackoverflow.com/a/51057399/1307905>`__ by
+ `Emmanuel Blot <https://stackoverflow.com/users/8233409/emmanuel-blot>`__),
+ which was removed while no longer dependent on ``libyaml``, C-extensions
+ compilation still needs a compiler though.
+
+[0, 15, 40]: 2018-06-18
+ - added links to landing places as suggested in issue 190 by
+ `KostisA <https://bitbucket.org/ankostis/>`__
+ - fixes issue #201: decoding unicode escaped tags on Python2, reported
+ by `Dan Abolafia <https://bitbucket.org/danabo/>`__
+
+[0, 15, 39]: 2018-06-16
+ - merge PR27 improving package startup time (and loading when regexp not
+ actually used), provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__
+
+[0, 15, 38]: 2018-06-13
+ - fix for losing precision when roundtripping floats by
+ `Rolf Wojtech <https://bitbucket.org/asomov/>`__
+ - fix for hardcoded dir separator not working for Windows by
+ `Nuno André <https://bitbucket.org/nu_no/>`__
+ - typo fix by `Andrey Somov <https://bitbucket.org/asomov/>`__
+
+[0, 15, 37]: 2018-03-21
+ - again trying to create installable files for 187
+
+[0, 15, 36]: 2018-02-07
+ - fix issue 187, incompatibility of C extension with 3.7 (reported by
+ Daniel Blanchard)
+
+[0, 15, 35]: 2017-12-03
+ - allow ``None`` as stream when specifying ``transform`` parameters to
+ ``YAML.dump()``.
+ This is useful if the transforming function doesn't return a meaningful value
+ (inspired by `StackOverflow <https://stackoverflow.com/q/47614862/1307905>`__ by
+ `rsaw <https://stackoverflow.com/users/406281/rsaw>`__).
+
+[0, 15, 34]: 2017-09-17
+ - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka)
+
+[0, 15, 33]: 2017-08-31
+ - support for "undefined" round-tripping tagged scalar objects (in addition to
+ tagged mapping object). Inspired by a use case presented by Matthew Patton
+ on `StackOverflow <https://stackoverflow.com/a/45967047/1307905>`__.
+ - fix issue 148: replace cryptic error message when using !!timestamp with an
+ incorrectly formatted or non- scalar. Reported by FichteFoll.
+
+[0, 15, 32]: 2017-08-21
+ - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for
+ for ``typ='rt'``.
+ - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float``
+
+[0, 15, 31]: 2017-08-15
+ - fix Comment dumping
+
+[0, 15, 30]: 2017-08-14
+ - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}``
+ (reported on `StackOverflow <https://stackoverflow.com/q/45681626/1307905>`_ by
+ `mjalkio <https://stackoverflow.com/users/5130525/mjalkio>`_
+
+[0, 15, 29]: 2017-08-14
+ - fix issue #51: different indents for mappings and sequences (reported by
+ Alex Harvey)
+ - fix for flow sequence/mapping as element/value of block sequence with
+ sequence-indent minus dash-offset not equal two.
+
+[0, 15, 28]: 2017-08-13
+ - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron)
+
+[0, 15, 27]: 2017-08-13
+ - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious
+ (reported by nowox)
+ - fix lists within lists which would make comments disappear
+
+[0, 15, 26]: 2017-08-10
+ - fix for disappearing comment after empty flow sequence (reported by
+ oit-tzhimmash)
+
+[0, 15, 25]: 2017-08-09
+ - fix for problem with dumping (unloaded) floats (reported by eyenseo)
+
+[0, 15, 24]: 2017-08-09
+ - added ScalarFloat which supports roundtripping of 23.1, 23.100,
+ 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas
+ are not preserved/supported (yet, is anybody using that?).
+ - (finally) fixed longstanding issue 23 (reported by `Antony Sottile
+ <https://bitbucket.org/asottile/>`_), now handling comment between block
+ mapping key and value correctly
+ - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML
+ provided by Cecil Curry)
+ - allow setting of boolean representation (`false`, `true`) by using:
+ ``yaml.boolean_representation = [u'False', u'True']``
+
+[0, 15, 23]: 2017-08-01
+ - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina)
+
+[0, 15, 22]: 2017-07-28
+ - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina)
+
+[0, 15, 21]: 2017-07-25
+ - fix for writing unicode in new API, https://stackoverflow.com/a/45281922/1307905
+
+[0, 15, 20]: 2017-07-23
+ - wheels for windows including C extensions
+
+[0, 15, 19]: 2017-07-13
+ - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject.
+ - fix for problem using load_all with Path() instance
+ - fix for load_all in combination with zero indent block style literal
+ (``pure=True`` only!)
+
+[0, 15, 18]: 2017-07-04
+ - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag
+ constructor for `including YAML files in a YAML file
+ <https://stackoverflow.com/a/44913652/1307905>`_
+ - some documentation improvements
+ - trigger of doc build on new revision
+
+[0, 15, 17]: 2017-07-03
+ - support for Unicode supplementary Plane **output** with allow_unicode
+ (input was already supported, triggered by
+ `this <https://stackoverflow.com/a/44875714/1307905>`_ Stack Overflow Q&A)
+
+[0, 15, 16]: 2017-07-01
+ - minor typing issues (reported and fix provided by
+ `Manvendra Singh <https://bitbucket.org/manu-chroma/>`_)
+ - small doc improvements
+
+[0, 15, 15]: 2017-06-27
+ - fix for issue 135, typ='safe' not dumping in Python 2.7
+ (reported by Andrzej Ostrowski <https://bitbucket.org/aostr123/>`_)
+
+[0, 15, 14]: 2017-06-25
+ - setup.py: change ModuleNotFoundError to ImportError (reported and fix by Asley Drake)
+
+[0, 15, 13]: 2017-06-24
+ - suppress duplicate key warning on mappings with merge keys (reported by
+ Cameron Sweeney)
+
+[0, 15, 12]: 2017-06-24
+ - remove fatal dependency of setup.py on wheel package (reported by
+ Cameron Sweeney)
+
+[0, 15, 11]: 2017-06-24
+ - fix for issue 130, regression in nested merge keys (reported by
+ `David Fee <https://bitbucket.org/dfee/>`_)
+
+[0, 15, 10]: 2017-06-23
+ - top level PreservedScalarString not indented if not explicitly asked to
+ - remove Makefile (not very useful anyway)
+ - some mypy additions
+
+[0, 15, 9]: 2017-06-16
+ - fix for issue 127: tagged scalars were always quoted and seperated
+ by a newline when in a block sequence (reported and largely fixed by
+ `Tommy Wang <https://bitbucket.org/twang817/>`_)
+
+[0, 15, 8]: 2017-06-15
+ - allow plug-in install via ``install ruamel.yaml[jinja2]``
+
+[0, 15, 7]: 2017-06-14
+ - add plug-in mechanism for load/dump pre resp. post-processing
+
+[0, 15, 6]: 2017-06-10
+ - a set() with duplicate elements now throws error in rt loading
+ - support for toplevel column zero literal/folded scalar in explicit documents
+
+[0, 15, 5]: 2017-06-08
+ - repeat `load()` on a single `YAML()` instance would fail.
+
+(0, 15, 4) 2017-06-08: |
+ - `transform` parameter on dump that expects a function taking a
+ string and returning a string. This allows transformation of the output
+ before it is written to stream.
+ - some updates to the docs
+
+(0, 15, 3) 2017-06-07:
+ - No longer try to compile C extensions on Windows. Compilation can be forced by setting
+ the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value
+ before starting the `pip install`.
+
+(0, 15, 2) 2017-06-07:
+ - update to conform to mypy 0.511:mypy --strict
+
+(0, 15, 1) 2017-06-07:
+ - Any `duplicate keys <http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys>`_
+ in mappings generate an error (in the old API this change generates a warning until 0.16)
+ - dependecy on ruamel.ordereddict for 2.7 now via extras_require
+
+(0, 15, 0) 2017-06-04:
+ - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all
+ load/dump functions
+ - passing in a non-supported object (e.g. a string) as "stream" will result in a
+ much more meaningful YAMLStreamError.
+ - assigning a normal string value to an existing CommentedMap key or CommentedSeq
+ element will result in a value cast to the previous value's type if possible.
+
+(0, 14, 12) 2017-05-14:
+ - fix for issue 119, deepcopy not returning subclasses (reported and PR by
+ Constantine Evans <[email protected]>)
+
+(0, 14, 11) 2017-05-01:
+ - fix for issue 103 allowing implicit documents after document end marker line (``...``)
+ in YAML 1.2
+
+(0, 14, 10) 2017-04-26:
+ - fix problem with emitting using cyaml
+
+(0, 14, 9) 2017-04-22:
+ - remove dependency on ``typing`` while still supporting ``mypy``
+ (http://stackoverflow.com/a/43516781/1307905)
+ - fix unclarity in doc that stated 2.6 is supported (reported by feetdust)
+
+(0, 14, 8) 2017-04-19:
+ - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards
+ on all files (reported by `João Paulo Magalhães <https://bitbucket.org/jpmag/>`_)
+
+(0, 14, 7) 2017-04-18:
+ - round trip of integers (decimal, octal, hex, binary) now preserve
+ leading zero(s) padding and underscores. Underscores are presumed
+ to be at regular distances (i.e. ``0o12_345_67`` dumps back as
+ ``0o1_23_45_67`` as the space from the last digit to the
+ underscore before that is the determining factor).
+
+(0, 14, 6) 2017-04-14:
+ - binary, octal and hex integers are now preserved by default. This
+ was a known deficiency. Working on this was prompted by the issue report (112)
+ from devnoname120, as well as the additional experience with `.replace()`
+ on `scalarstring` classes.
+ - fix issues 114 cannot install on Buildozer (reported by mixmastamyk).
+ Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check.
+
+(0, 14, 5) 2017-04-04:
+ - fix issue 109 None not dumping correctly at top level (reported by Andrea Censi)
+ - fix issue 110 .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString
+ would give back "normal" string (reported by sandres23)
+
+(0, 14, 4) 2017-03-31:
+ - fix readme
+
+(0, 14, 3) 2017-03-31:
+ - fix for 0o52 not being a string in YAML 1.1 (reported on
+ `StackOverflow Q&A 43138503><http://stackoverflow.com/a/43138503/1307905>`_ by
+ `Frank D <http://stackoverflow.com/users/7796630/frank-d>`_
+
+(0, 14, 2) 2017-03-23:
+ - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch)
+
+(0.14.1) 2017-03-22:
+ - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré)
+
+(0.14.0) 2017-03-21:
+ - updates for mypy --strict
+ - preparation for moving away from inheritance in Loader and Dumper, calls from e.g.
+ the Representer to the Serializer.serialize() are now done via the attribute
+ .serializer.serialize(). Usage of .serialize() outside of Serializer will be
+ deprecated soon
+ - some extra tests on main.py functions
+
+(0.13.14) 2017-02-12:
+ - fix for issue 97, clipped block scalar followed by empty lines and comment
+ would result in two CommentTokens of which the first was dropped.
+ (reported by Colm O'Connor)
+
+(0.13.13) 2017-01-28:
+ - fix for issue 96, prevent insertion of extra empty line if indented mapping entries
+ are separated by an empty line (reported by Derrick Sawyer)
+
+(0.13.11) 2017-01-23:
+ - allow ':' in flow style scalars if not followed by space. Also don't
+ quote such scalar as this is no longer necessary.
+ - add python 3.6 manylinux wheel to PyPI
+
+(0.13.10) 2017-01-22:
+ - fix for issue 93, insert spurious blank line before single line comment
+ between indented sequence elements (reported by Alex)
+
+(0.13.9) 2017-01-18:
+ - fix for issue 92, wrong import name reported by the-corinthian
+
+(0.13.8) 2017-01-18:
+ - fix for issue 91, when a compiler is unavailable reported by Maximilian Hils
+ - fix for deepcopy issue with TimeStamps not preserving 'T', reported on
+ `StackOverflow Q&A <http://stackoverflow.com/a/41577841/1307905>`_ by
+ `Quuxplusone <http://stackoverflow.com/users/1424877/quuxplusone>`_
+
+(0.13.7) 2016-12-27:
+ - fix for issue 85, constructor.py importing unicode_literals caused mypy to fail
+ on 2.7 (reported by Peter Amstutz)
+
+(0.13.6) 2016-12-27:
+ - fix for issue 83, collections.OrderedDict not representable by SafeRepresenter
+ (reported by Frazer McLean)
+
+(0.13.5) 2016-12-25:
+ - fix for issue 84, deepcopy not properly working (reported by Peter Amstutz)
+
+(0.13.4) 2016-12-05:
+ - another fix for issue 82, change to non-global resolver data broke implicit type
+ specification
+
+(0.13.3) 2016-12-05:
+ - fix for issue 82, deepcopy not working (reported by code monk)
+
+(0.13.2) 2016-11-28:
+ - fix for comments after empty (null) values (reported by dsw2127 and cokelaer)
+
+(0.13.1) 2016-11-22:
+ - optimisations on memory usage when loading YAML from large files (py3 -50%, py2 -85%)
+
+(0.13.0) 2016-11-20:
+ - if ``load()`` or ``load_all()`` is called with only a single argument
+ (stream or string)
+ a UnsafeLoaderWarning will be issued once. If appropriate you can surpress this
+ warning by filtering it. Explicitly supplying the ``Loader=ruamel.yaml.Loader``
+ argument, will also prevent it from being issued. You should however consider
+ using ``safe_load()``, ``safe_load_all()`` if your YAML input does not use tags.
+ - allow adding comments before and after keys (based on
+ `StackOveflow Q&A <http://stackoverflow.com/a/40705671/1307905>`_ by
+ `msinn <http://stackoverflow.com/users/7185467/msinn>`_)
+
+(0.12.18) 2016-11-16:
+ - another fix for numpy (re-reported independently by PaulG & Nathanial Burdic)
+
+(0.12.17) 2016-11-15:
+ - only the RoundTripLoader included the Resolver that supports YAML 1.2
+ now all loaders do (reported by mixmastamyk)
+
+(0.12.16) 2016-11-13:
+ - allow dot char (and many others) in anchor name
+ Fix issue 72 (reported by Shalon Wood)
+ - |
+ Slightly smarter behaviour dumping strings when no style is
+ specified. Single string scalars that start with single quotes
+ or have newlines now are dumped double quoted "'abc\nklm'" instead of
+
+ '''abc
+
+ klm'''
+
+(0.12.14) 2016-09-21:
+ - preserve round-trip sequences that are mapping keys
+ (prompted by stackoverflow question 39595807 from Nowox)
+
+(0.12.13) 2016-09-15:
+ - Fix for issue #60 representation of CommentedMap with merge
+ keys incorrect (reported by Tal Liron)
+
+(0.12.11) 2016-09-06:
+ - Fix issue 58 endless loop in scanning tokens (reported by
+ Christopher Lambert)
+
+(0.12.10) 2016-09-05:
+ - Make previous fix depend on unicode char width (32 bit unicode support
+ is a problem on MacOS reported by David Tagatac)
+
+(0.12.8) 2016-09-05:
+ - To be ignored Unicode characters were not properly regex matched
+ (no specific tests, PR by Haraguroicha Hsu)
+
+(0.12.7) 2016-09-03:
+ - fixing issue 54 empty lines with spaces (reported by Alex Harvey)
+
+(0.12.6) 2016-09-03:
+ - fixing issue 46 empty lines between top-level keys were gobbled (but
+ not between sequence elements, nor between keys in netsted mappings
+ (reported by Alex Harvey)
+
+(0.12.5) 2016-08-20:
+ - fixing issue 45 preserving datetime formatting (submitted by altuin)
+ Several formatting parameters are preserved with some normalisation:
+ - preserve 'T', 't' is replaced by 'T', multiple spaces between date
+ and time reduced to one.
+ - optional space before timezone is removed
+ - still using microseconds, but now rounded (.1234567 -> .123457)
+ - Z/-5/+01:00 preserved
+
+(0.12.4) 2016-08-19:
+ - Fix for issue 44: missing preserve_quotes keyword argument (reported
+ by M. Crusoe)
+
+(0.12.3) 2016-08-17:
+ - correct 'in' operation for merged CommentedMaps in round-trip mode
+ (implementation inspired by J.Ngo, but original not working for merges)
+ - iteration over round-trip loaded mappings, that contain merges. Also
+ keys(), items(), values() (Py3/Py2) and iterkeys(), iteritems(),
+ itervalues(), viewkeys(), viewitems(), viewvalues() (Py2)
+ - reuse of anchor name now generates warning, not an error. Round-tripping such
+ anchors works correctly. This inherited PyYAML issue was brought to attention
+ by G. Coddut (and was long standing https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=515634)
+ suppressing the warning::
+
+ import warnings
+ from ruamel.yaml.error import ReusedAnchorWarning
+ warnings.simplefilter("ignore", ReusedAnchorWarning)
+
+(0.12.2) 2016-08-16:
+ - minor improvements based on feedback from M. Crusoe
+ https://bitbucket.org/ruamel/yaml/issues/42/
+
+(0.12.0) 2016-08-16:
+ - drop support for Python 2.6
+ - include initial Type information (inspired by M. Crusoe)
+
+(0.11.15) 2016-08-07:
+ - Change to prevent FutureWarning in NumPy, as reported by tgehring
+ ("comparison to None will result in an elementwise object comparison in the future")
+
+(0.11.14) 2016-07-06:
+ - fix preserve_quotes missing on original Loaders (as reported
+ by Leynos, bitbucket issue 38)
+
+(0.11.13) 2016-07-06:
+ - documentation only, automated linux wheels
+
+(0.11.12) 2016-07-06:
+ - added support for roundtrip of single/double quoted scalars using:
+ ruamel.yaml.round_trip_load(stream, preserve_quotes=True)
+
+(0.11.10) 2016-05-02:
+
+- added .insert(pos, key, value, comment=None) to CommentedMap
+
+(0.11.10) 2016-04-19:
+
+- indent=2, block_seq_indent=2 works as expected
+
+(0.11.0) 2016-02-18:
+ - RoundTripLoader loads 1.2 by default (no sexagesimals, 012 octals nor
+ yes/no/on/off booleans
+
+(0.10.11) 2015-09-17:
+- Fix issue 13: dependency on libyaml to be installed for yaml.h
+
+(0.10.10) 2015-09-15:
+- Python 3.5 tested with tox
+- pypy full test (old PyYAML tests failed on too many open file handles)
+
+(0.10.6-0.10.9) 2015-09-14:
+- Fix for issue 9
+- Fix for issue 11: double dump losing comments
+- Include libyaml code
+- move code from 'py' subdir for proper namespace packaging.
+
+(0.10.5) 2015-08-25:
+- preservation of newlines after block scalars. Contributed by Sam Thursfield.
+
+(0.10) 2015-06-22:
+- preservation of hand crafted anchor names ( not of the form "idNNN")
+- preservation of map merges ( <<< )
+
+(0.9) 2015-04-18:
+- collections read in by the RoundTripLoader now have a ``lc`` property
+ that can be quired for line and column ( ``lc.line`` resp. ``lc.col``)
+
+(0.8) 2015-04-15:
+- bug fix for non-roundtrip save of ordereddict
+- adding/replacing end of line comments on block style mappings/sequences
+
+(0.7.2) 2015-03-29:
+- support for end-of-line comments on flow style sequences and mappings
+
+(0.7.1) 2015-03-27:
+- RoundTrip capability of flow style sequences ( 'a: b, c, d' )
+
+(0.7) 2015-03-26:
+- tests (currently failing) for inline sequece and non-standard spacing between
+ block sequence dash and scalar (Anthony Sottile)
+- initial possibility (on list, i.e. CommentedSeq) to set the flow format
+ explicitly
+- RoundTrip capability of flow style sequences ( 'a: b, c, d' )
+
+(0.6.1) 2015-03-15:
+- setup.py changed so ruamel.ordereddict no longer is a dependency
+ if not on CPython 2.x (used to test only for 2.x, which breaks pypy 2.5.0
+ reported by Anthony Sottile)
+
+(0.6) 2015-03-11:
+- basic support for scalars with preserved newlines
+- html option for yaml command
+- check if yaml C library is available before trying to compile C extension
+- include unreleased change in PyYAML dd 20141128
+
+(0.5) 2015-01-14:
+- move configobj -> YAML generator to own module
+- added dependency on ruamel.base (based on feedback from Sess
+
+(0.4) 20141125:
+- move comment classes in own module comments
+- fix omap pre comment
+- make !!omap and !!set take parameters. There are still some restrictions:
+ - no comments before the !!tag
+- extra tests
+
+(0.3) 20141124:
+- fix value comment occuring as on previous line (looking like eol comment)
+- INI conversion in yaml + tests
+- (hidden) test in yaml for debugging with auto command
+- fix for missing comment in middel of simple map + test
+
+(0.2) 20141123:
+- add ext/_yaml.c etc to the source tree
+- tests for yaml to work on 2.6/3.3/3.4
+- change install so that you can include ruamel.yaml instead of ruamel.yaml.py
+- add "yaml" utility with initial subcommands (test rt, from json)
+
+(0.1) 20141122:
+- merge py2 and py3 code bases
+- remove support for 2.5/3.0/3.1/3.2 (this merge relies on u"" as
+ available in 3.3 and . imports not available in 2.5)
+- tox.ini for 2.7/3.4/2.6/3.3
+- remove lib3/ and tests/lib3 directories and content
+- commit
+- correct --verbose for test application
+- DATA=changed to be relative to __file__ of code
+- DATA using os.sep
+- remove os.path from imports as os is already imported
+- have test_yaml.py exit with value 0 on success, 1 on failures, 2 on
+ error
+- added support for octal integers starting with '0o'
+ keep support for 01234 as well as 0o1234
+- commit
+- added test_roundtrip_data:
+ requirest a .data file and .roundtrip (empty), yaml_load .data
+ and compare dump against original.
+- fix grammar as per David Pursehouse:
+ https://bitbucket.org/xi/pyyaml/pull-request/5/fix-grammar-in-error-messages/diff
+- http://www.json.org/ extra escaped char \/
+ add .skip-ext as libyaml is not updated
+- David Fraser: Extract a method to represent keys in mappings, so that
+ a subclass can choose not to quote them, used in repesent_mapping
+ https://bitbucket.org/davidfraser/pyyaml/
+- add CommentToken and percolate through parser and composer and constructor
+- add Comments to wrapped mapping and sequence constructs (not to scalars)
+- generate YAML with comments
+- initial README
diff --git a/libs/dynaconf/vendor/ruamel/yaml/LICENSE b/libs/dynaconf/vendor/ruamel/yaml/LICENSE
new file mode 100644
index 000000000..5b863d3de
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/LICENSE
@@ -0,0 +1,21 @@
+ The MIT License (MIT)
+
+ Copyright (c) 2014-2020 Anthon van der Neut, Ruamel bvba
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/libs/dynaconf/vendor/ruamel/yaml/MANIFEST.in b/libs/dynaconf/vendor/ruamel/yaml/MANIFEST.in
new file mode 100644
index 000000000..1aa779878
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/MANIFEST.in
@@ -0,0 +1,3 @@
+include README.rst LICENSE CHANGES setup.py
+prune ext*
+prune clib*
diff --git a/libs/dynaconf/vendor/ruamel/yaml/PKG-INFO b/libs/dynaconf/vendor/ruamel/yaml/PKG-INFO
new file mode 100644
index 000000000..b0ce985c1
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/PKG-INFO
@@ -0,0 +1,782 @@
+Metadata-Version: 2.1
+Name: ruamel.yaml
+Version: 0.16.10
+Summary: ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order
+Home-page: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree
+Author: Anthon van der Neut
+Author-email: [email protected]
+License: MIT license
+Description:
+ ruamel.yaml
+ ===========
+
+ ``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python.
+
+ :version: 0.16.10
+ :updated: 2020-02-12
+ :documentation: http://yaml.readthedocs.io
+ :repository: https://bitbucket.org/ruamel/yaml
+ :pypi: https://pypi.org/project/ruamel.yaml/
+
+
+ Starting with version 0.15.0 the way YAML files are loaded and dumped
+ is changing. See the API doc for details. Currently existing
+ functionality will throw a warning before being changed/removed.
+ **For production systems you should pin the version being used with
+ ``ruamel.yaml<=0.15``**. There might be bug fixes in the 0.14 series,
+ but new functionality is likely only to be available via the new API.
+
+ If your package uses ``ruamel.yaml`` and is not listed on PyPI, drop
+ me an email, preferably with some information on how you use the
+ package (or a link to bitbucket/github) and I'll keep you informed
+ when the status of the API is stable enough to make the transition.
+
+ * `Overview <http://yaml.readthedocs.org/en/latest/overview.html>`_
+ * `Installing <http://yaml.readthedocs.org/en/latest/install.html>`_
+ * `Basic Usage <http://yaml.readthedocs.org/en/latest/basicuse.html>`_
+ * `Details <http://yaml.readthedocs.org/en/latest/detail.html>`_
+ * `Examples <http://yaml.readthedocs.org/en/latest/example.html>`_
+ * `API <http://yaml.readthedocs.org/en/latest/api.html>`_
+ * `Differences with PyYAML <http://yaml.readthedocs.org/en/latest/pyyaml.html>`_
+
+ .. image:: https://readthedocs.org/projects/yaml/badge/?version=stable
+ :target: https://yaml.readthedocs.org/en/stable
+
+ .. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+ .. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
+ .. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw
+ :target: https://pypi.org/project/ruamel.yaml/
+
+ .. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw
+ :target: https://pypi.org/project/oitnb/
+
+ .. image:: http://www.mypy-lang.org/static/mypy_badge.svg
+ :target: http://mypy-lang.org/
+
+ ChangeLog
+ =========
+
+ .. should insert NEXT: at the beginning of line for next key (with empty line)
+
+ 0.16.10 (2020-02-12):
+ - (auto) updated image references in README to sourceforge
+
+ 0.16.9 (2020-02-11):
+ - update CHANGES
+
+ 0.16.8 (2020-02-11):
+ - update requirements so that ruamel.yaml.clib is installed for 3.8,
+ as it has become available (via manylinux builds)
+
+ 0.16.7 (2020-01-30):
+ - fix typchecking issue on TaggedScalar (reported by Jens Nielsen)
+ - fix error in dumping literal scalar in sequence with comments before element
+ (reported by `EJ Etherington <https://sourceforge.net/u/ejether/>`__)
+
+ 0.16.6 (2020-01-20):
+ - fix empty string mapping key roundtripping with preservation of quotes as `? ''`
+ (reported via email by Tomer Aharoni).
+ - fix incorrect state setting in class constructor (reported by `Douglas Raillard
+ <https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/>`__)
+ - adjust deprecation warning test for Hashable, as that no longer warns (reported
+ by `Jason Montleon <https://bitbucket.org/%7B8f377d12-8d5b-4069-a662-00a2674fee4e%7D/>`__)
+
+ 0.16.5 (2019-08-18):
+ - allow for ``YAML(typ=['unsafe', 'pytypes'])``
+
+ 0.16.4 (2019-08-16):
+ - fix output of TAG directives with # (reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+
+ 0.16.3 (2019-08-15):
+ - split construct_object
+ - change stuff back to keep mypy happy
+ - move setting of version based on YAML directive to scanner, allowing to
+ check for file version during TAG directive scanning
+
+ 0.16.2 (2019-08-15):
+ - preserve YAML and TAG directives on roundtrip, correctly output #
+ in URL for YAML 1.2 (both reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+ 0.16.1 (2019-08-08):
+ - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz
+ <https://bitbucket.org/%7B9af55900-2534-4212-976c-61339b6ffe14%7D/>`__)
+ - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by
+ `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+ 0.16.0 (2019-07-25):
+ - split of C source that generates .so file to ruamel.yaml.clib
+ - duplicate keys are now an error when working with the old API as well
+
+ 0.15.100 (2019-07-17):
+ - fixing issue with dumping deep-copied data from commented YAML, by
+ providing both the memo parameter to __deepcopy__, and by allowing
+ startmarks to be compared on their content (reported by `Theofilos
+ Petsios
+ <https://bitbucket.org/%7Be550bc5d-403d-4fda-820b-bebbe71796d3%7D/>`__)
+
+ 0.15.99 (2019-07-12):
+ - add `py.typed` to distribution, based on a PR submitted by
+ `Michael Crusoe
+ <https://bitbucket.org/%7Bc9fbde69-e746-48f5-900d-34992b7860c8%7D/>`__
+ - merge PR 40 (also by Michael Crusoe) to more accurately specify
+ repository in the README (also reported in a misunderstood issue
+ some time ago)
+
+ 0.15.98 (2019-07-09):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed
+ for Python 3.8.0b2 (reported by `John Vandenberg
+ <https://bitbucket.org/%7B6d4e8487-3c97-4dab-a060-088ec50c682c%7D/>`__)
+
+ 0.15.97 (2019-06-06):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for
+ Python 3.8.0b1
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for
+ Python 3.8.0a4 (reported by `Anthony Sottile
+ <https://bitbucket.org/%7B569cc8ea-0d9e-41cb-94a4-19ea517324df%7D/>`__)
+
+ 0.15.96 (2019-05-16):
+ - fix failure to indent comments on round-trip anchored block style
+ scalars in block sequence (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+
+ 0.15.95 (2019-05-16):
+ - fix failure to round-trip anchored scalars in block sequence
+ (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+ - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18
+ <https://www.python.org/dev/peps/pep-0429/>`__)
+
+ 0.15.94 (2019-04-23):
+ - fix missing line-break after end-of-file comments not ending in
+ line-break (reported by `Philip Thompson
+ <https://bitbucket.org/%7Be42ba205-0876-4151-bcbe-ccaea5bd13ce%7D/>`__)
+
+ 0.15.93 (2019-04-21):
+ - fix failure to parse empty implicit flow mapping key
+ - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now
+ correctly recognised as booleans and such strings dumped quoted
+ (reported by `Marcel Bollmann
+ <https://bitbucket.org/%7Bd8850921-9145-4ad0-ac30-64c3bd9b036d%7D/>`__)
+
+ 0.15.92 (2019-04-16):
+ - fix failure to parse empty implicit block mapping key (reported by
+ `Nolan W <https://bitbucket.org/i2labs/>`__)
+
+ 0.15.91 (2019-04-05):
+ - allowing duplicate keys would not work for merge keys (reported by mamacdon on
+ `StackOverflow <https://stackoverflow.com/questions/55540686/>`__
+
+ 0.15.90 (2019-04-04):
+ - fix issue with updating `CommentedMap` from list of tuples (reported by
+ `Peter Henry <https://bitbucket.org/mosbasik/>`__)
+
+ 0.15.89 (2019-02-27):
+ - fix for items with flow-mapping in block sequence output on single line
+ (reported by `Zahari Dim <https://bitbucket.org/zahari_dim/>`__)
+ - fix for safe dumping erroring in creation of representereror when dumping namedtuple
+ (reported and solution by `Jaakko Kantojärvi <https://bitbucket.org/raphendyr/>`__)
+
+ 0.15.88 (2019-02-12):
+ - fix inclusing of python code from the subpackage data (containing extra tests,
+ reported by `Florian Apolloner <https://bitbucket.org/apollo13/>`__)
+
+ 0.15.87 (2019-01-22):
+ - fix problem with empty lists and the code to reinsert merge keys (reported via email
+ by Zaloo)
+
+ 0.15.86 (2019-01-16):
+ - reinsert merge key in its old position (reported by grumbler on
+ `StackOverflow <https://stackoverflow.com/a/54206512/1307905>`__)
+ - fix for issue with non-ASCII anchor names (reported and fix
+ provided by Dandaleon Flux via email)
+ - fix for issue when parsing flow mapping value starting with colon (in pure Python only)
+ (reported by `FichteFoll <https://bitbucket.org/FichteFoll/>`__)
+
+ 0.15.85 (2019-01-08):
+ - the types used by ``SafeConstructor`` for mappings and sequences can
+ now by set by assigning to ``XXXConstructor.yaml_base_dict_type``
+ (and ``..._list_type``), preventing the need to copy two methods
+ with 50+ lines that had ``var = {}`` hardcoded. (Implemented to
+ help solve an feature request by `Anthony Sottile
+ <https://bitbucket.org/asottile/>`__ in an easier way)
+
+ 0.15.84 (2019-01-07):
+ - fix for ``CommentedMap.copy()`` not returning ``CommentedMap``, let alone copying comments etc.
+ (reported by `Anthony Sottile <https://bitbucket.org/asottile/>`__)
+
+ 0.15.83 (2019-01-02):
+ - fix for bug in roundtripping aliases used as key (reported via email by Zaloo)
+
+ 0.15.82 (2018-12-28):
+ - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors
+ do not need a referring alias for these (reported by
+ `Alex Harvey <https://bitbucket.org/alexharv074/>`__)
+ - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo
+ <https://bitbucket.org/zaloo/>`__)
+
+ 0.15.81 (2018-12-06):
+ - fix issue dumping methods of metaclass derived classes (reported and fix provided
+ by `Douglas Raillard <https://bitbucket.org/DouglasRaillard/>`__)
+
+ 0.15.80 (2018-11-26):
+ - fix issue emitting BEL character when round-tripping invalid folded input
+ (reported by Isaac on `StackOverflow <https://stackoverflow.com/a/53471217/1307905>`__)
+
+ 0.15.79 (2018-11-21):
+ - fix issue with anchors nested deeper than alias (reported by gaFF on
+ `StackOverflow <https://stackoverflow.com/a/53397781/1307905>`__)
+
+ 0.15.78 (2018-11-15):
+ - fix setup issue for 3.8 (reported by `Sidney Kuyateh
+ <https://bitbucket.org/autinerd/>`__)
+
+ 0.15.77 (2018-11-09):
+ - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent
+ explicit sorting by keys in the base representer of mappings. Roundtrip
+ already did not do this. Usage only makes real sense for Python 3.6+
+ (feature request by `Sebastian Gerber <https://bitbucket.org/spacemanspiff2007/>`__).
+ - implement Python version check in YAML metadata in ``_test/test_z_data.py``
+
+ 0.15.76 (2018-11-01):
+ - fix issue with empty mapping and sequence loaded as flow-style
+ (mapping reported by `Min RK <https://bitbucket.org/minrk/>`__, sequence
+ by `Maged Ahmed <https://bitbucket.org/maged2/>`__)
+
+ 0.15.75 (2018-10-27):
+ - fix issue with single '?' scalar (reported by `Terrance
+ <https://bitbucket.org/OllieTerrance/>`__)
+ - fix issue with duplicate merge keys (prompted by `answering
+ <https://stackoverflow.com/a/52852106/1307905>`__ a
+ `StackOverflow question <https://stackoverflow.com/q/52851168/1307905>`__
+ by `math <https://stackoverflow.com/users/1355634/math>`__)
+
+ 0.15.74 (2018-10-17):
+ - fix dropping of comment on rt before sequence item that is sequence item
+ (reported by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+
+ 0.15.73 (2018-10-16):
+ - fix irregular output on pre-comment in sequence within sequence (reported
+ by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+ - allow non-compact (i.e. next line) dumping sequence/mapping within sequence.
+
+ 0.15.72 (2018-10-06):
+ - fix regression on explicit 1.1 loading with the C based scanner/parser
+ (reported by `Tomas Vavra <https://bitbucket.org/xtomik/>`__)
+
+ 0.15.71 (2018-09-26):
+ - some of the tests now live in YAML files in the
+ `yaml.data <https://bitbucket.org/ruamel/yaml.data>`__ repository.
+ ``_test/test_z_data.py`` processes these.
+ - fix regression where handcrafted CommentedMaps could not be initiated (reported by
+ `Dan Helfman <https://bitbucket.org/dhelfman/>`__)
+ - fix regression with non-root literal scalars that needed indent indicator
+ (reported by `Clark Breyman <https://bitbucket.org/clarkbreyman/>`__)
+ - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+ - issue with self-referring object creation
+ (reported and fix by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+ 0.15.70 (2018-09-21):
+ - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list,
+ reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON
+ dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``.
+ (Proposed by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__, with feedback
+ from `blhsing <https://stackoverflow.com/users/6890912/blhsing>`__ on
+ `StackOverflow <https://stackoverflow.com/q/52314186/1307905>`__)
+
+ 0.15.69 (2018-09-20):
+ - fix issue with dump_all gobbling end-of-document comments on parsing
+ (reported by `Pierre B. <https://bitbucket.org/octplane/>`__)
+
+ 0.15.68 (2018-09-20):
+ - fix issue with parsabel, but incorrect output with nested flow-style sequences
+ (reported by `Dougal Seeley <https://bitbucket.org/dseeley/>`__)
+ - fix issue with loading Python objects that have __setstate__ and recursion in parameters
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+ 0.15.67 (2018-09-19):
+ - fix issue with extra space inserted with non-root literal strings
+ (Issue reported and PR with fix provided by
+ `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+
+ 0.15.66 (2018-09-07):
+ - fix issue with fold indicating characters inserted in safe_load-ed folded strings
+ (reported by `Maximilian Hils <https://bitbucket.org/mhils/>`__).
+
+ 0.15.65 (2018-09-07):
+ - fix issue #232 revert to throw ParserError for unexcpected ``]``
+ and ``}`` instead of IndexError. (Issue reported and PR with fix
+ provided by `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+ - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email)
+ - indent root level literal scalars that have directive or document end markers
+ at the beginning of a line
+
+ 0.15.64 (2018-08-30):
+ - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]``
+ - single entry mappings in flow sequences now written by default without braces,
+ set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force
+ getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]``
+ - fix issue when roundtripping floats starting with a dot such as ``.5``
+ (reported by `Harrison Gregg <https://bitbucket.org/HarrisonGregg/>`__)
+
+ 0.15.63 (2018-08-29):
+ - small fix only necessary for Windows users that don't use wheels.
+
+ 0.15.62 (2018-08-29):
+ - C based reader/scanner & emitter now allow setting of 1.2 as YAML version.
+ ** The loading/dumping is still YAML 1.1 code**, so use the common subset of
+ YAML 1.2 and 1.1 (reported by `Ge Yang <https://bitbucket.org/yangge/>`__)
+
+ 0.15.61 (2018-08-23):
+ - support for round-tripping folded style scalars (initially requested
+ by `Johnathan Viduchinsky <https://bitbucket.org/johnathanvidu/>`__)
+ - update of C code
+ - speed up of scanning (~30% depending on the input)
+
+ 0.15.60 (2018-08-18):
+ - again allow single entry map in flow sequence context (reported by
+ `Lee Goolsbee <https://bitbucket.org/lgoolsbee/>`__)
+ - cleanup for mypy
+ - spurious print in library (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__), now automatically checked
+
+ 0.15.59 (2018-08-17):
+ - issue with C based loader and leading zeros (reported by
+ `Tom Hamilton Stubber <https://bitbucket.org/TomHamiltonStubber/>`__)
+
+ 0.15.58 (2018-08-17):
+ - simple mappings can now be used as keys when round-tripping::
+
+ {a: 1, b: 2}: hello world
+
+ although using the obvious operations (del, popitem) on the key will
+ fail, you can mutilate it by going through its attributes. If you load the
+ above YAML in `d`, then changing the value is cumbersome:
+
+ d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"}
+
+ and changing the key even more so:
+
+ d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop(
+ CommentedKeyMap([('a', 1), ('b', 2)]))
+
+ (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result
+ in a different order, of the keys of the key, in the output)
+ - check integers to dump with 1.2 patterns instead of 1.1 (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__)
+
+
+ 0.15.57 (2018-08-15):
+ - Fix that CommentedSeq could no longer be used in adding or do a sort
+ (reported by `Christopher Wright <https://bitbucket.org/CJ-Wright4242/>`__)
+
+ 0.15.56 (2018-08-15):
+ - fix issue with ``python -O`` optimizing away code (reported, and detailed cause
+ pinpointed, by `Alex Grönholm <https://bitbucket.org/agronholm/>`__)
+
+ 0.15.55 (2018-08-14):
+ - unmade ``CommentedSeq`` a subclass of ``list``. It is now
+ indirectly a subclass of the standard
+ ``collections.abc.MutableSequence`` (without .abc if you are
+ still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'),
+ list)``) anywhere in your code replace ``list`` with
+ ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of
+ the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``,
+ with the result that *(extended) slicing is supported on
+ ``CommentedSeq``*.
+ (reported by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__)
+ - duplicate keys (or their values) with non-ascii now correctly
+ report in Python2, instead of raising a Unicode error.
+ (Reported by `Jonathan Pyle <https://bitbucket.org/jonathan_pyle/>`__)
+
+ 0.15.54 (2018-08-13):
+ - fix issue where a comment could pop-up twice in the output (reported by
+ `Mike Kazantsev <https://bitbucket.org/mk_fg/>`__ and by
+ `Nate Peterson <https://bitbucket.org/ndpete21/>`__)
+ - fix issue where JSON object (mapping) without spaces was not parsed
+ properly (reported by `Marc Schmidt <https://bitbucket.org/marcj/>`__)
+ - fix issue where comments after empty flow-style mappings were not emitted
+ (reported by `Qinfench Chen <https://bitbucket.org/flyin5ish/>`__)
+
+ 0.15.53 (2018-08-12):
+ - fix issue with flow style mapping with comments gobbled newline (reported
+ by `Christopher Lambert <https://bitbucket.org/XN137/>`__)
+ - fix issue where single '+' under YAML 1.2 was interpreted as
+ integer, erroring out (reported by `Jethro Yu
+ <https://bitbucket.org/jcppkkk/>`__)
+
+ 0.15.52 (2018-08-09):
+ - added `.copy()` mapping representation for round-tripping
+ (``CommentedMap``) to fix incomplete copies of merged mappings
+ (reported by `Will Richards
+ <https://bitbucket.org/will_richards/>`__)
+ - Also unmade that class a subclass of ordereddict to solve incorrect behaviour
+ for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported independently by
+ `Tim Olsson <https://bitbucket.org/tgolsson/>`__ and
+ `Filip Matzner <https://bitbucket.org/FloopCZ/>`__)
+
+ 0.15.51 (2018-08-08):
+ - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard
+ <https://bitbucket.org/DouglasRaillard/>`__)
+ - Fix spurious trailing white-space caused when the comment start
+ column was no longer reached and there was no actual EOL comment
+ (e.g. following empty line) and doing substitutions, or when
+ quotes around scalars got dropped. (reported by `Thomas Guillet
+ <https://bitbucket.org/guillett/>`__)
+
+ 0.15.50 (2018-08-05):
+ - Allow ``YAML()`` as a context manager for output, thereby making it much easier
+ to generate multi-documents in a stream.
+ - Fix issue with incorrect type information for `load()` and `dump()` (reported
+ by `Jimbo Jim <https://bitbucket.org/jimbo1qaz/>`__)
+
+ 0.15.49 (2018-08-05):
+ - fix preservation of leading newlines in root level literal style scalar,
+ and preserve comment after literal style indicator (``| # some comment``)
+ Both needed for round-tripping multi-doc streams in
+ `ryd <https://pypi.org/project/ryd/>`__.
+
+ 0.15.48 (2018-08-03):
+ - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity
+
+ 0.15.47 (2018-07-31):
+ - fix broken 3.6 manylinux1, the result of an unclean ``build`` (reported by
+ `Roman Sichnyi <https://bitbucket.org/rsichnyi-gl/>`__)
+
+
+ 0.15.46 (2018-07-29):
+ - fixed DeprecationWarning for importing from ``collections`` on 3.7
+ (issue 210, reported by `Reinoud Elhorst
+ <https://bitbucket.org/reinhrst/>`__). It was `difficult to find
+ why tox/pytest did not report
+ <https://stackoverflow.com/q/51573204/1307905>`__ and as time
+ consuming to actually `fix
+ <https://stackoverflow.com/a/51573205/1307905>`__ the tests.
+
+ 0.15.45 (2018-07-26):
+ - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration
+ (PR provided by `Zachary Buhman <https://bitbucket.org/buhman/>`__,
+ also reported by `Steven Hiscocks <https://bitbucket.org/sdhiscocks/>`__.
+
+ 0.15.44 (2018-07-14):
+ - Correct loading plain scalars consisting of numerals only and
+ starting with `0`, when not explicitly specifying YAML version
+ 1.1. This also fixes the issue about dumping string `'019'` as
+ plain scalars as reported by `Min RK
+ <https://bitbucket.org/minrk/>`__, that prompted this chance.
+
+ 0.15.43 (2018-07-12):
+ - merge PR33: Python2.7 on Windows is narrow, but has no
+ ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__)
+ - ``register_class()`` now returns class (proposed by
+ `Mike Nerone <https://bitbucket.org/Manganeez/>`__}
+
+ 0.15.42 (2018-07-01):
+ - fix regression showing only on narrow Python 2.7 (py27mu) builds
+ (with help from
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__ and
+ `Colm O'Connor <https://bitbucket.org/colmoconnorgithub/>`__).
+ - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as
+ 3.4/3.5/3.6/3.7/pypy
+
+ 0.15.41 (2018-06-27):
+ - add detection of C-compile failure (investigation prompted by
+ `StackOverlow <https://stackoverflow.com/a/51057399/1307905>`__ by
+ `Emmanuel Blot <https://stackoverflow.com/users/8233409/emmanuel-blot>`__),
+ which was removed while no longer dependent on ``libyaml``, C-extensions
+ compilation still needs a compiler though.
+
+ 0.15.40 (2018-06-18):
+ - added links to landing places as suggested in issue 190 by
+ `KostisA <https://bitbucket.org/ankostis/>`__
+ - fixes issue #201: decoding unicode escaped tags on Python2, reported
+ by `Dan Abolafia <https://bitbucket.org/danabo/>`__
+
+ 0.15.39 (2018-06-17):
+ - merge PR27 improving package startup time (and loading when regexp not
+ actually used), provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__
+
+ 0.15.38 (2018-06-13):
+ - fix for losing precision when roundtripping floats by
+ `Rolf Wojtech <https://bitbucket.org/asomov/>`__
+ - fix for hardcoded dir separator not working for Windows by
+ `Nuno André <https://bitbucket.org/nu_no/>`__
+ - typo fix by `Andrey Somov <https://bitbucket.org/asomov/>`__
+
+ 0.15.37 (2018-03-21):
+ - again trying to create installable files for 187
+
+ 0.15.36 (2018-02-07):
+ - fix issue 187, incompatibility of C extension with 3.7 (reported by
+ Daniel Blanchard)
+
+ 0.15.35 (2017-12-03):
+ - allow ``None`` as stream when specifying ``transform`` parameters to
+ ``YAML.dump()``.
+ This is useful if the transforming function doesn't return a meaningful value
+ (inspired by `StackOverflow <https://stackoverflow.com/q/47614862/1307905>`__ by
+ `rsaw <https://stackoverflow.com/users/406281/rsaw>`__).
+
+ 0.15.34 (2017-09-17):
+ - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka)
+
+ 0.15.33 (2017-08-31):
+ - support for "undefined" round-tripping tagged scalar objects (in addition to
+ tagged mapping object). Inspired by a use case presented by Matthew Patton
+ on `StackOverflow <https://stackoverflow.com/a/45967047/1307905>`__.
+ - fix issue 148: replace cryptic error message when using !!timestamp with an
+ incorrectly formatted or non- scalar. Reported by FichteFoll.
+
+ 0.15.32 (2017-08-21):
+ - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for
+ for ``typ='rt'``.
+ - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float``
+ (reported by [email protected])
+
+ 0.15.31 (2017-08-15):
+ - fix Comment dumping
+
+ 0.15.30 (2017-08-14):
+ - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}``
+ (reported on `StackOverflow <https://stackoverflow.com/q/45681626/1307905>`__ by
+ `mjalkio <https://stackoverflow.com/users/5130525/mjalkio>`_
+
+ 0.15.29 (2017-08-14):
+ - fix issue #51: different indents for mappings and sequences (reported by
+ Alex Harvey)
+ - fix for flow sequence/mapping as element/value of block sequence with
+ sequence-indent minus dash-offset not equal two.
+
+ 0.15.28 (2017-08-13):
+ - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron)
+
+ 0.15.27 (2017-08-13):
+ - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious
+ (reported by nowox)
+ - fix lists within lists which would make comments disappear
+
+ 0.15.26 (2017-08-10):
+ - fix for disappearing comment after empty flow sequence (reported by
+ oit-tzhimmash)
+
+ 0.15.25 (2017-08-09):
+ - fix for problem with dumping (unloaded) floats (reported by eyenseo)
+
+ 0.15.24 (2017-08-09):
+ - added ScalarFloat which supports roundtripping of 23.1, 23.100,
+ 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas
+ are not preserved/supported (yet, is anybody using that?).
+ - (finally) fixed longstanding issue 23 (reported by `Antony Sottile
+ <https://bitbucket.org/asottile/>`__), now handling comment between block
+ mapping key and value correctly
+ - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML
+ provided by Cecil Curry)
+ - allow setting of boolean representation (`false`, `true`) by using:
+ ``yaml.boolean_representation = [u'False', u'True']``
+
+ 0.15.23 (2017-08-01):
+ - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina)
+
+ 0.15.22 (2017-07-28):
+ - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina)
+
+ 0.15.21 (2017-07-25):
+ - fix for writing unicode in new API, (reported on
+ `StackOverflow <https://stackoverflow.com/a/45281922/1307905>`__
+
+ 0.15.20 (2017-07-23):
+ - wheels for windows including C extensions
+
+ 0.15.19 (2017-07-13):
+ - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject.
+ - fix for problem using load_all with Path() instance
+ - fix for load_all in combination with zero indent block style literal
+ (``pure=True`` only!)
+
+ 0.15.18 (2017-07-04):
+ - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag
+ constructor for `including YAML files in a YAML file
+ <https://stackoverflow.com/a/44913652/1307905>`__
+ - some documentation improvements
+ - trigger of doc build on new revision
+
+ 0.15.17 (2017-07-03):
+ - support for Unicode supplementary Plane **output**
+ (input was already supported, triggered by
+ `this <https://stackoverflow.com/a/44875714/1307905>`__ Stack Overflow Q&A)
+
+ 0.15.16 (2017-07-01):
+ - minor typing issues (reported and fix provided by
+ `Manvendra Singh <https://bitbucket.org/manu-chroma/>`__
+ - small doc improvements
+
+ 0.15.15 (2017-06-27):
+ - fix for issue 135, typ='safe' not dumping in Python 2.7
+ (reported by Andrzej Ostrowski <https://bitbucket.org/aostr123/>`__)
+
+ 0.15.14 (2017-06-25):
+ - fix for issue 133, in setup.py: change ModuleNotFoundError to
+ ImportError (reported and fix by
+ `Asley Drake <https://github.com/aldraco>`__)
+
+ 0.15.13 (2017-06-24):
+ - suppress duplicate key warning on mappings with merge keys (reported by
+ Cameron Sweeney)
+
+ 0.15.12 (2017-06-24):
+ - remove fatal dependency of setup.py on wheel package (reported by
+ Cameron Sweeney)
+
+ 0.15.11 (2017-06-24):
+ - fix for issue 130, regression in nested merge keys (reported by
+ `David Fee <https://bitbucket.org/dfee/>`__)
+
+ 0.15.10 (2017-06-23):
+ - top level PreservedScalarString not indented if not explicitly asked to
+ - remove Makefile (not very useful anyway)
+ - some mypy additions
+
+ 0.15.9 (2017-06-16):
+ - fix for issue 127: tagged scalars were always quoted and seperated
+ by a newline when in a block sequence (reported and largely fixed by
+ `Tommy Wang <https://bitbucket.org/twang817/>`__)
+
+ 0.15.8 (2017-06-15):
+ - allow plug-in install via ``install ruamel.yaml[jinja2]``
+
+ 0.15.7 (2017-06-14):
+ - add plug-in mechanism for load/dump pre resp. post-processing
+
+ 0.15.6 (2017-06-10):
+ - a set() with duplicate elements now throws error in rt loading
+ - support for toplevel column zero literal/folded scalar in explicit documents
+
+ 0.15.5 (2017-06-08):
+ - repeat `load()` on a single `YAML()` instance would fail.
+
+ 0.15.4 (2017-06-08):
+ - `transform` parameter on dump that expects a function taking a
+ string and returning a string. This allows transformation of the output
+ before it is written to stream. This forces creation of the complete output in memory!
+ - some updates to the docs
+
+ 0.15.3 (2017-06-07):
+ - No longer try to compile C extensions on Windows. Compilation can be forced by setting
+ the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value
+ before starting the `pip install`.
+
+ 0.15.2 (2017-06-07):
+ - update to conform to mypy 0.511: mypy --strict
+
+ 0.15.1 (2017-06-07):
+ - `duplicate keys <http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys>`__
+ in mappings generate an error (in the old API this change generates a warning until 0.16)
+ - dependecy on ruamel.ordereddict for 2.7 now via extras_require
+
+ 0.15.0 (2017-06-04):
+ - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all
+ load/dump functions
+ - passing in a non-supported object (e.g. a string) as "stream" will result in a
+ much more meaningful YAMLStreamError.
+ - assigning a normal string value to an existing CommentedMap key or CommentedSeq
+ element will result in a value cast to the previous value's type if possible.
+ - added ``YAML`` class for new API
+
+ 0.14.12 (2017-05-14):
+ - fix for issue 119, deepcopy not returning subclasses (reported and PR by
+ Constantine Evans <[email protected]>)
+
+ 0.14.11 (2017-05-01):
+ - fix for issue 103 allowing implicit documents after document end marker line (``...``)
+ in YAML 1.2
+
+ 0.14.10 (2017-04-26):
+ - fix problem with emitting using cyaml
+
+ 0.14.9 (2017-04-22):
+ - remove dependency on ``typing`` while still supporting ``mypy``
+ (http://stackoverflow.com/a/43516781/1307905)
+ - fix unclarity in doc that stated 2.6 is supported (reported by feetdust)
+
+ 0.14.8 (2017-04-19):
+ - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards
+ on all files (reported by `João Paulo Magalhães <https://bitbucket.org/jpmag/>`__)
+
+ 0.14.7 (2017-04-18):
+ - round trip of integers (decimal, octal, hex, binary) now preserve
+ leading zero(s) padding and underscores. Underscores are presumed
+ to be at regular distances (i.e. ``0o12_345_67`` dumps back as
+ ``0o1_23_45_67`` as the space from the last digit to the
+ underscore before that is the determining factor).
+
+ 0.14.6 (2017-04-14):
+ - binary, octal and hex integers are now preserved by default. This
+ was a known deficiency. Working on this was prompted by the issue report (112)
+ from devnoname120, as well as the additional experience with `.replace()`
+ on `scalarstring` classes.
+ - fix issues 114: cannot install on Buildozer (reported by mixmastamyk).
+ Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check.
+
+ 0.14.5 (2017-04-04):
+ - fix issue 109: None not dumping correctly at top level (reported by Andrea Censi)
+ - fix issue 110: .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString
+ would give back "normal" string (reported by sandres23)
+
+ 0.14.4 (2017-03-31):
+ - fix readme
+
+ 0.14.3 (2017-03-31):
+ - fix for 0o52 not being a string in YAML 1.1 (reported on
+ `StackOverflow Q&A 43138503 <http://stackoverflow.com/a/43138503/1307905>`__ by
+ `Frank D <http://stackoverflow.com/users/7796630/frank-d>`__)
+
+ 0.14.2 (2017-03-23):
+ - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch)
+
+ 0.14.1 (2017-03-22):
+ - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré)
+
+ 0.14.0 (2017-03-21):
+ - updates for mypy --strict
+ - preparation for moving away from inheritance in Loader and Dumper, calls from e.g.
+ the Representer to the Serializer.serialize() are now done via the attribute
+ .serializer.serialize(). Usage of .serialize() outside of Serializer will be
+ deprecated soon
+ - some extra tests on main.py functions
+
+ ----
+
+ For older changes see the file
+ `CHANGES <https://bitbucket.org/ruamel/yaml/src/default/CHANGES>`_
+
+Keywords: yaml 1.2 parser round-trip preserve quotes order config
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: Jython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Classifier: Typing :: Typed
+Description-Content-Type: text/x-rst
+Provides-Extra: docs
+Provides-Extra: jinja2
diff --git a/libs/dynaconf/vendor/ruamel/yaml/README.rst b/libs/dynaconf/vendor/ruamel/yaml/README.rst
new file mode 100644
index 000000000..2a99cb9d4
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/README.rst
@@ -0,0 +1,752 @@
+
+ruamel.yaml
+===========
+
+``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python.
+
+:version: 0.16.10
+:updated: 2020-02-12
+:documentation: http://yaml.readthedocs.io
+:repository: https://bitbucket.org/ruamel/yaml
+:pypi: https://pypi.org/project/ruamel.yaml/
+
+
+Starting with version 0.15.0 the way YAML files are loaded and dumped
+is changing. See the API doc for details. Currently existing
+functionality will throw a warning before being changed/removed.
+**For production systems you should pin the version being used with
+``ruamel.yaml<=0.15``**. There might be bug fixes in the 0.14 series,
+but new functionality is likely only to be available via the new API.
+
+If your package uses ``ruamel.yaml`` and is not listed on PyPI, drop
+me an email, preferably with some information on how you use the
+package (or a link to bitbucket/github) and I'll keep you informed
+when the status of the API is stable enough to make the transition.
+
+* `Overview <http://yaml.readthedocs.org/en/latest/overview.html>`_
+* `Installing <http://yaml.readthedocs.org/en/latest/install.html>`_
+* `Basic Usage <http://yaml.readthedocs.org/en/latest/basicuse.html>`_
+* `Details <http://yaml.readthedocs.org/en/latest/detail.html>`_
+* `Examples <http://yaml.readthedocs.org/en/latest/example.html>`_
+* `API <http://yaml.readthedocs.org/en/latest/api.html>`_
+* `Differences with PyYAML <http://yaml.readthedocs.org/en/latest/pyyaml.html>`_
+
+.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable
+ :target: https://yaml.readthedocs.org/en/stable
+
+.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/1128
+
+.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw
+ :target: https://opensource.org/licenses/MIT
+
+.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw
+ :target: https://pypi.org/project/ruamel.yaml/
+
+.. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw
+ :target: https://pypi.org/project/oitnb/
+
+.. image:: http://www.mypy-lang.org/static/mypy_badge.svg
+ :target: http://mypy-lang.org/
+
+ChangeLog
+=========
+
+.. should insert NEXT: at the beginning of line for next key (with empty line)
+
+0.16.10 (2020-02-12):
+ - (auto) updated image references in README to sourceforge
+
+0.16.9 (2020-02-11):
+ - update CHANGES
+
+0.16.8 (2020-02-11):
+ - update requirements so that ruamel.yaml.clib is installed for 3.8,
+ as it has become available (via manylinux builds)
+
+0.16.7 (2020-01-30):
+ - fix typchecking issue on TaggedScalar (reported by Jens Nielsen)
+ - fix error in dumping literal scalar in sequence with comments before element
+ (reported by `EJ Etherington <https://sourceforge.net/u/ejether/>`__)
+
+0.16.6 (2020-01-20):
+ - fix empty string mapping key roundtripping with preservation of quotes as `? ''`
+ (reported via email by Tomer Aharoni).
+ - fix incorrect state setting in class constructor (reported by `Douglas Raillard
+ <https://bitbucket.org/%7Bcf052d92-a278-4339-9aa8-de41923bb556%7D/>`__)
+ - adjust deprecation warning test for Hashable, as that no longer warns (reported
+ by `Jason Montleon <https://bitbucket.org/%7B8f377d12-8d5b-4069-a662-00a2674fee4e%7D/>`__)
+
+0.16.5 (2019-08-18):
+ - allow for ``YAML(typ=['unsafe', 'pytypes'])``
+
+0.16.4 (2019-08-16):
+ - fix output of TAG directives with # (reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+
+0.16.3 (2019-08-15):
+ - split construct_object
+ - change stuff back to keep mypy happy
+ - move setting of version based on YAML directive to scanner, allowing to
+ check for file version during TAG directive scanning
+
+0.16.2 (2019-08-15):
+ - preserve YAML and TAG directives on roundtrip, correctly output #
+ in URL for YAML 1.2 (both reported by `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+0.16.1 (2019-08-08):
+ - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz
+ <https://bitbucket.org/%7B9af55900-2534-4212-976c-61339b6ffe14%7D/>`__)
+ - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by
+ `Thomas Smith
+ <https://bitbucket.org/%7Bd4c57a72-f041-4843-8217-b4d48b6ece2f%7D/>`__)
+
+0.16.0 (2019-07-25):
+ - split of C source that generates .so file to ruamel.yaml.clib
+ - duplicate keys are now an error when working with the old API as well
+
+0.15.100 (2019-07-17):
+ - fixing issue with dumping deep-copied data from commented YAML, by
+ providing both the memo parameter to __deepcopy__, and by allowing
+ startmarks to be compared on their content (reported by `Theofilos
+ Petsios
+ <https://bitbucket.org/%7Be550bc5d-403d-4fda-820b-bebbe71796d3%7D/>`__)
+
+0.15.99 (2019-07-12):
+ - add `py.typed` to distribution, based on a PR submitted by
+ `Michael Crusoe
+ <https://bitbucket.org/%7Bc9fbde69-e746-48f5-900d-34992b7860c8%7D/>`__
+ - merge PR 40 (also by Michael Crusoe) to more accurately specify
+ repository in the README (also reported in a misunderstood issue
+ some time ago)
+
+0.15.98 (2019-07-09):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed
+ for Python 3.8.0b2 (reported by `John Vandenberg
+ <https://bitbucket.org/%7B6d4e8487-3c97-4dab-a060-088ec50c682c%7D/>`__)
+
+0.15.97 (2019-06-06):
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for
+ Python 3.8.0b1
+ - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for
+ Python 3.8.0a4 (reported by `Anthony Sottile
+ <https://bitbucket.org/%7B569cc8ea-0d9e-41cb-94a4-19ea517324df%7D/>`__)
+
+0.15.96 (2019-05-16):
+ - fix failure to indent comments on round-trip anchored block style
+ scalars in block sequence (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+
+0.15.95 (2019-05-16):
+ - fix failure to round-trip anchored scalars in block sequence
+ (reported by `William Kimball
+ <https://bitbucket.org/%7Bba35ed20-4bb0-46f8-bb5d-c29871e86a22%7D/>`__)
+ - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18
+ <https://www.python.org/dev/peps/pep-0429/>`__)
+
+0.15.94 (2019-04-23):
+ - fix missing line-break after end-of-file comments not ending in
+ line-break (reported by `Philip Thompson
+ <https://bitbucket.org/%7Be42ba205-0876-4151-bcbe-ccaea5bd13ce%7D/>`__)
+
+0.15.93 (2019-04-21):
+ - fix failure to parse empty implicit flow mapping key
+ - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now
+ correctly recognised as booleans and such strings dumped quoted
+ (reported by `Marcel Bollmann
+ <https://bitbucket.org/%7Bd8850921-9145-4ad0-ac30-64c3bd9b036d%7D/>`__)
+
+0.15.92 (2019-04-16):
+ - fix failure to parse empty implicit block mapping key (reported by
+ `Nolan W <https://bitbucket.org/i2labs/>`__)
+
+0.15.91 (2019-04-05):
+ - allowing duplicate keys would not work for merge keys (reported by mamacdon on
+ `StackOverflow <https://stackoverflow.com/questions/55540686/>`__
+
+0.15.90 (2019-04-04):
+ - fix issue with updating `CommentedMap` from list of tuples (reported by
+ `Peter Henry <https://bitbucket.org/mosbasik/>`__)
+
+0.15.89 (2019-02-27):
+ - fix for items with flow-mapping in block sequence output on single line
+ (reported by `Zahari Dim <https://bitbucket.org/zahari_dim/>`__)
+ - fix for safe dumping erroring in creation of representereror when dumping namedtuple
+ (reported and solution by `Jaakko Kantojärvi <https://bitbucket.org/raphendyr/>`__)
+
+0.15.88 (2019-02-12):
+ - fix inclusing of python code from the subpackage data (containing extra tests,
+ reported by `Florian Apolloner <https://bitbucket.org/apollo13/>`__)
+
+0.15.87 (2019-01-22):
+ - fix problem with empty lists and the code to reinsert merge keys (reported via email
+ by Zaloo)
+
+0.15.86 (2019-01-16):
+ - reinsert merge key in its old position (reported by grumbler on
+ `StackOverflow <https://stackoverflow.com/a/54206512/1307905>`__)
+ - fix for issue with non-ASCII anchor names (reported and fix
+ provided by Dandaleon Flux via email)
+ - fix for issue when parsing flow mapping value starting with colon (in pure Python only)
+ (reported by `FichteFoll <https://bitbucket.org/FichteFoll/>`__)
+
+0.15.85 (2019-01-08):
+ - the types used by ``SafeConstructor`` for mappings and sequences can
+ now by set by assigning to ``XXXConstructor.yaml_base_dict_type``
+ (and ``..._list_type``), preventing the need to copy two methods
+ with 50+ lines that had ``var = {}`` hardcoded. (Implemented to
+ help solve an feature request by `Anthony Sottile
+ <https://bitbucket.org/asottile/>`__ in an easier way)
+
+0.15.84 (2019-01-07):
+ - fix for ``CommentedMap.copy()`` not returning ``CommentedMap``, let alone copying comments etc.
+ (reported by `Anthony Sottile <https://bitbucket.org/asottile/>`__)
+
+0.15.83 (2019-01-02):
+ - fix for bug in roundtripping aliases used as key (reported via email by Zaloo)
+
+0.15.82 (2018-12-28):
+ - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors
+ do not need a referring alias for these (reported by
+ `Alex Harvey <https://bitbucket.org/alexharv074/>`__)
+ - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo
+ <https://bitbucket.org/zaloo/>`__)
+
+0.15.81 (2018-12-06):
+ - fix issue dumping methods of metaclass derived classes (reported and fix provided
+ by `Douglas Raillard <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.80 (2018-11-26):
+ - fix issue emitting BEL character when round-tripping invalid folded input
+ (reported by Isaac on `StackOverflow <https://stackoverflow.com/a/53471217/1307905>`__)
+
+0.15.79 (2018-11-21):
+ - fix issue with anchors nested deeper than alias (reported by gaFF on
+ `StackOverflow <https://stackoverflow.com/a/53397781/1307905>`__)
+
+0.15.78 (2018-11-15):
+ - fix setup issue for 3.8 (reported by `Sidney Kuyateh
+ <https://bitbucket.org/autinerd/>`__)
+
+0.15.77 (2018-11-09):
+ - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent
+ explicit sorting by keys in the base representer of mappings. Roundtrip
+ already did not do this. Usage only makes real sense for Python 3.6+
+ (feature request by `Sebastian Gerber <https://bitbucket.org/spacemanspiff2007/>`__).
+ - implement Python version check in YAML metadata in ``_test/test_z_data.py``
+
+0.15.76 (2018-11-01):
+ - fix issue with empty mapping and sequence loaded as flow-style
+ (mapping reported by `Min RK <https://bitbucket.org/minrk/>`__, sequence
+ by `Maged Ahmed <https://bitbucket.org/maged2/>`__)
+
+0.15.75 (2018-10-27):
+ - fix issue with single '?' scalar (reported by `Terrance
+ <https://bitbucket.org/OllieTerrance/>`__)
+ - fix issue with duplicate merge keys (prompted by `answering
+ <https://stackoverflow.com/a/52852106/1307905>`__ a
+ `StackOverflow question <https://stackoverflow.com/q/52851168/1307905>`__
+ by `math <https://stackoverflow.com/users/1355634/math>`__)
+
+0.15.74 (2018-10-17):
+ - fix dropping of comment on rt before sequence item that is sequence item
+ (reported by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+
+0.15.73 (2018-10-16):
+ - fix irregular output on pre-comment in sequence within sequence (reported
+ by `Thorsten Kampe <https://bitbucket.org/thorstenkampe/>`__)
+ - allow non-compact (i.e. next line) dumping sequence/mapping within sequence.
+
+0.15.72 (2018-10-06):
+ - fix regression on explicit 1.1 loading with the C based scanner/parser
+ (reported by `Tomas Vavra <https://bitbucket.org/xtomik/>`__)
+
+0.15.71 (2018-09-26):
+ - some of the tests now live in YAML files in the
+ `yaml.data <https://bitbucket.org/ruamel/yaml.data>`__ repository.
+ ``_test/test_z_data.py`` processes these.
+ - fix regression where handcrafted CommentedMaps could not be initiated (reported by
+ `Dan Helfman <https://bitbucket.org/dhelfman/>`__)
+ - fix regression with non-root literal scalars that needed indent indicator
+ (reported by `Clark Breyman <https://bitbucket.org/clarkbreyman/>`__)
+ - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+ - issue with self-referring object creation
+ (reported and fix by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.70 (2018-09-21):
+ - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list,
+ reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON
+ dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``.
+ (Proposed by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__, with feedback
+ from `blhsing <https://stackoverflow.com/users/6890912/blhsing>`__ on
+ `StackOverflow <https://stackoverflow.com/q/52314186/1307905>`__)
+
+0.15.69 (2018-09-20):
+ - fix issue with dump_all gobbling end-of-document comments on parsing
+ (reported by `Pierre B. <https://bitbucket.org/octplane/>`__)
+
+0.15.68 (2018-09-20):
+ - fix issue with parsabel, but incorrect output with nested flow-style sequences
+ (reported by `Dougal Seeley <https://bitbucket.org/dseeley/>`__)
+ - fix issue with loading Python objects that have __setstate__ and recursion in parameters
+ (reported by `Douglas RAILLARD <https://bitbucket.org/DouglasRaillard/>`__)
+
+0.15.67 (2018-09-19):
+ - fix issue with extra space inserted with non-root literal strings
+ (Issue reported and PR with fix provided by
+ `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+
+0.15.66 (2018-09-07):
+ - fix issue with fold indicating characters inserted in safe_load-ed folded strings
+ (reported by `Maximilian Hils <https://bitbucket.org/mhils/>`__).
+
+0.15.65 (2018-09-07):
+ - fix issue #232 revert to throw ParserError for unexcpected ``]``
+ and ``}`` instead of IndexError. (Issue reported and PR with fix
+ provided by `Naomi Seyfer <https://bitbucket.org/sixolet/>`__.)
+ - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email)
+ - indent root level literal scalars that have directive or document end markers
+ at the beginning of a line
+
+0.15.64 (2018-08-30):
+ - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]``
+ - single entry mappings in flow sequences now written by default without braces,
+ set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force
+ getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]``
+ - fix issue when roundtripping floats starting with a dot such as ``.5``
+ (reported by `Harrison Gregg <https://bitbucket.org/HarrisonGregg/>`__)
+
+0.15.63 (2018-08-29):
+ - small fix only necessary for Windows users that don't use wheels.
+
+0.15.62 (2018-08-29):
+ - C based reader/scanner & emitter now allow setting of 1.2 as YAML version.
+ ** The loading/dumping is still YAML 1.1 code**, so use the common subset of
+ YAML 1.2 and 1.1 (reported by `Ge Yang <https://bitbucket.org/yangge/>`__)
+
+0.15.61 (2018-08-23):
+ - support for round-tripping folded style scalars (initially requested
+ by `Johnathan Viduchinsky <https://bitbucket.org/johnathanvidu/>`__)
+ - update of C code
+ - speed up of scanning (~30% depending on the input)
+
+0.15.60 (2018-08-18):
+ - again allow single entry map in flow sequence context (reported by
+ `Lee Goolsbee <https://bitbucket.org/lgoolsbee/>`__)
+ - cleanup for mypy
+ - spurious print in library (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__), now automatically checked
+
+0.15.59 (2018-08-17):
+ - issue with C based loader and leading zeros (reported by
+ `Tom Hamilton Stubber <https://bitbucket.org/TomHamiltonStubber/>`__)
+
+0.15.58 (2018-08-17):
+ - simple mappings can now be used as keys when round-tripping::
+
+ {a: 1, b: 2}: hello world
+
+ although using the obvious operations (del, popitem) on the key will
+ fail, you can mutilate it by going through its attributes. If you load the
+ above YAML in `d`, then changing the value is cumbersome:
+
+ d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"}
+
+ and changing the key even more so:
+
+ d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop(
+ CommentedKeyMap([('a', 1), ('b', 2)]))
+
+ (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result
+ in a different order, of the keys of the key, in the output)
+ - check integers to dump with 1.2 patterns instead of 1.1 (reported by
+ `Lele Gaifax <https://bitbucket.org/lele/>`__)
+
+
+0.15.57 (2018-08-15):
+ - Fix that CommentedSeq could no longer be used in adding or do a sort
+ (reported by `Christopher Wright <https://bitbucket.org/CJ-Wright4242/>`__)
+
+0.15.56 (2018-08-15):
+ - fix issue with ``python -O`` optimizing away code (reported, and detailed cause
+ pinpointed, by `Alex Grönholm <https://bitbucket.org/agronholm/>`__)
+
+0.15.55 (2018-08-14):
+ - unmade ``CommentedSeq`` a subclass of ``list``. It is now
+ indirectly a subclass of the standard
+ ``collections.abc.MutableSequence`` (without .abc if you are
+ still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'),
+ list)``) anywhere in your code replace ``list`` with
+ ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of
+ the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``,
+ with the result that *(extended) slicing is supported on
+ ``CommentedSeq``*.
+ (reported by `Stuart Berg <https://bitbucket.org/stuarteberg/>`__)
+ - duplicate keys (or their values) with non-ascii now correctly
+ report in Python2, instead of raising a Unicode error.
+ (Reported by `Jonathan Pyle <https://bitbucket.org/jonathan_pyle/>`__)
+
+0.15.54 (2018-08-13):
+ - fix issue where a comment could pop-up twice in the output (reported by
+ `Mike Kazantsev <https://bitbucket.org/mk_fg/>`__ and by
+ `Nate Peterson <https://bitbucket.org/ndpete21/>`__)
+ - fix issue where JSON object (mapping) without spaces was not parsed
+ properly (reported by `Marc Schmidt <https://bitbucket.org/marcj/>`__)
+ - fix issue where comments after empty flow-style mappings were not emitted
+ (reported by `Qinfench Chen <https://bitbucket.org/flyin5ish/>`__)
+
+0.15.53 (2018-08-12):
+ - fix issue with flow style mapping with comments gobbled newline (reported
+ by `Christopher Lambert <https://bitbucket.org/XN137/>`__)
+ - fix issue where single '+' under YAML 1.2 was interpreted as
+ integer, erroring out (reported by `Jethro Yu
+ <https://bitbucket.org/jcppkkk/>`__)
+
+0.15.52 (2018-08-09):
+ - added `.copy()` mapping representation for round-tripping
+ (``CommentedMap``) to fix incomplete copies of merged mappings
+ (reported by `Will Richards
+ <https://bitbucket.org/will_richards/>`__)
+ - Also unmade that class a subclass of ordereddict to solve incorrect behaviour
+ for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported independently by
+ `Tim Olsson <https://bitbucket.org/tgolsson/>`__ and
+ `Filip Matzner <https://bitbucket.org/FloopCZ/>`__)
+
+0.15.51 (2018-08-08):
+ - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard
+ <https://bitbucket.org/DouglasRaillard/>`__)
+ - Fix spurious trailing white-space caused when the comment start
+ column was no longer reached and there was no actual EOL comment
+ (e.g. following empty line) and doing substitutions, or when
+ quotes around scalars got dropped. (reported by `Thomas Guillet
+ <https://bitbucket.org/guillett/>`__)
+
+0.15.50 (2018-08-05):
+ - Allow ``YAML()`` as a context manager for output, thereby making it much easier
+ to generate multi-documents in a stream.
+ - Fix issue with incorrect type information for `load()` and `dump()` (reported
+ by `Jimbo Jim <https://bitbucket.org/jimbo1qaz/>`__)
+
+0.15.49 (2018-08-05):
+ - fix preservation of leading newlines in root level literal style scalar,
+ and preserve comment after literal style indicator (``| # some comment``)
+ Both needed for round-tripping multi-doc streams in
+ `ryd <https://pypi.org/project/ryd/>`__.
+
+0.15.48 (2018-08-03):
+ - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity
+
+0.15.47 (2018-07-31):
+ - fix broken 3.6 manylinux1, the result of an unclean ``build`` (reported by
+ `Roman Sichnyi <https://bitbucket.org/rsichnyi-gl/>`__)
+
+
+0.15.46 (2018-07-29):
+ - fixed DeprecationWarning for importing from ``collections`` on 3.7
+ (issue 210, reported by `Reinoud Elhorst
+ <https://bitbucket.org/reinhrst/>`__). It was `difficult to find
+ why tox/pytest did not report
+ <https://stackoverflow.com/q/51573204/1307905>`__ and as time
+ consuming to actually `fix
+ <https://stackoverflow.com/a/51573205/1307905>`__ the tests.
+
+0.15.45 (2018-07-26):
+ - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration
+ (PR provided by `Zachary Buhman <https://bitbucket.org/buhman/>`__,
+ also reported by `Steven Hiscocks <https://bitbucket.org/sdhiscocks/>`__.
+
+0.15.44 (2018-07-14):
+ - Correct loading plain scalars consisting of numerals only and
+ starting with `0`, when not explicitly specifying YAML version
+ 1.1. This also fixes the issue about dumping string `'019'` as
+ plain scalars as reported by `Min RK
+ <https://bitbucket.org/minrk/>`__, that prompted this chance.
+
+0.15.43 (2018-07-12):
+ - merge PR33: Python2.7 on Windows is narrow, but has no
+ ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__)
+ - ``register_class()`` now returns class (proposed by
+ `Mike Nerone <https://bitbucket.org/Manganeez/>`__}
+
+0.15.42 (2018-07-01):
+ - fix regression showing only on narrow Python 2.7 (py27mu) builds
+ (with help from
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__ and
+ `Colm O'Connor <https://bitbucket.org/colmoconnorgithub/>`__).
+ - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as
+ 3.4/3.5/3.6/3.7/pypy
+
+0.15.41 (2018-06-27):
+ - add detection of C-compile failure (investigation prompted by
+ `StackOverlow <https://stackoverflow.com/a/51057399/1307905>`__ by
+ `Emmanuel Blot <https://stackoverflow.com/users/8233409/emmanuel-blot>`__),
+ which was removed while no longer dependent on ``libyaml``, C-extensions
+ compilation still needs a compiler though.
+
+0.15.40 (2018-06-18):
+ - added links to landing places as suggested in issue 190 by
+ `KostisA <https://bitbucket.org/ankostis/>`__
+ - fixes issue #201: decoding unicode escaped tags on Python2, reported
+ by `Dan Abolafia <https://bitbucket.org/danabo/>`__
+
+0.15.39 (2018-06-17):
+ - merge PR27 improving package startup time (and loading when regexp not
+ actually used), provided by
+ `Marcel Bargull <https://bitbucket.org/mbargull/>`__
+
+0.15.38 (2018-06-13):
+ - fix for losing precision when roundtripping floats by
+ `Rolf Wojtech <https://bitbucket.org/asomov/>`__
+ - fix for hardcoded dir separator not working for Windows by
+ `Nuno André <https://bitbucket.org/nu_no/>`__
+ - typo fix by `Andrey Somov <https://bitbucket.org/asomov/>`__
+
+0.15.37 (2018-03-21):
+ - again trying to create installable files for 187
+
+0.15.36 (2018-02-07):
+ - fix issue 187, incompatibility of C extension with 3.7 (reported by
+ Daniel Blanchard)
+
+0.15.35 (2017-12-03):
+ - allow ``None`` as stream when specifying ``transform`` parameters to
+ ``YAML.dump()``.
+ This is useful if the transforming function doesn't return a meaningful value
+ (inspired by `StackOverflow <https://stackoverflow.com/q/47614862/1307905>`__ by
+ `rsaw <https://stackoverflow.com/users/406281/rsaw>`__).
+
+0.15.34 (2017-09-17):
+ - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka)
+
+0.15.33 (2017-08-31):
+ - support for "undefined" round-tripping tagged scalar objects (in addition to
+ tagged mapping object). Inspired by a use case presented by Matthew Patton
+ on `StackOverflow <https://stackoverflow.com/a/45967047/1307905>`__.
+ - fix issue 148: replace cryptic error message when using !!timestamp with an
+ incorrectly formatted or non- scalar. Reported by FichteFoll.
+
+0.15.32 (2017-08-21):
+ - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for
+ for ``typ='rt'``.
+ - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float``
+ (reported by [email protected])
+
+0.15.31 (2017-08-15):
+ - fix Comment dumping
+
+0.15.30 (2017-08-14):
+ - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}``
+ (reported on `StackOverflow <https://stackoverflow.com/q/45681626/1307905>`__ by
+ `mjalkio <https://stackoverflow.com/users/5130525/mjalkio>`_
+
+0.15.29 (2017-08-14):
+ - fix issue #51: different indents for mappings and sequences (reported by
+ Alex Harvey)
+ - fix for flow sequence/mapping as element/value of block sequence with
+ sequence-indent minus dash-offset not equal two.
+
+0.15.28 (2017-08-13):
+ - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron)
+
+0.15.27 (2017-08-13):
+ - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious
+ (reported by nowox)
+ - fix lists within lists which would make comments disappear
+
+0.15.26 (2017-08-10):
+ - fix for disappearing comment after empty flow sequence (reported by
+ oit-tzhimmash)
+
+0.15.25 (2017-08-09):
+ - fix for problem with dumping (unloaded) floats (reported by eyenseo)
+
+0.15.24 (2017-08-09):
+ - added ScalarFloat which supports roundtripping of 23.1, 23.100,
+ 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas
+ are not preserved/supported (yet, is anybody using that?).
+ - (finally) fixed longstanding issue 23 (reported by `Antony Sottile
+ <https://bitbucket.org/asottile/>`__), now handling comment between block
+ mapping key and value correctly
+ - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML
+ provided by Cecil Curry)
+ - allow setting of boolean representation (`false`, `true`) by using:
+ ``yaml.boolean_representation = [u'False', u'True']``
+
+0.15.23 (2017-08-01):
+ - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina)
+
+0.15.22 (2017-07-28):
+ - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina)
+
+0.15.21 (2017-07-25):
+ - fix for writing unicode in new API, (reported on
+ `StackOverflow <https://stackoverflow.com/a/45281922/1307905>`__
+
+0.15.20 (2017-07-23):
+ - wheels for windows including C extensions
+
+0.15.19 (2017-07-13):
+ - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject.
+ - fix for problem using load_all with Path() instance
+ - fix for load_all in combination with zero indent block style literal
+ (``pure=True`` only!)
+
+0.15.18 (2017-07-04):
+ - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag
+ constructor for `including YAML files in a YAML file
+ <https://stackoverflow.com/a/44913652/1307905>`__
+ - some documentation improvements
+ - trigger of doc build on new revision
+
+0.15.17 (2017-07-03):
+ - support for Unicode supplementary Plane **output**
+ (input was already supported, triggered by
+ `this <https://stackoverflow.com/a/44875714/1307905>`__ Stack Overflow Q&A)
+
+0.15.16 (2017-07-01):
+ - minor typing issues (reported and fix provided by
+ `Manvendra Singh <https://bitbucket.org/manu-chroma/>`__
+ - small doc improvements
+
+0.15.15 (2017-06-27):
+ - fix for issue 135, typ='safe' not dumping in Python 2.7
+ (reported by Andrzej Ostrowski <https://bitbucket.org/aostr123/>`__)
+
+0.15.14 (2017-06-25):
+ - fix for issue 133, in setup.py: change ModuleNotFoundError to
+ ImportError (reported and fix by
+ `Asley Drake <https://github.com/aldraco>`__)
+
+0.15.13 (2017-06-24):
+ - suppress duplicate key warning on mappings with merge keys (reported by
+ Cameron Sweeney)
+
+0.15.12 (2017-06-24):
+ - remove fatal dependency of setup.py on wheel package (reported by
+ Cameron Sweeney)
+
+0.15.11 (2017-06-24):
+ - fix for issue 130, regression in nested merge keys (reported by
+ `David Fee <https://bitbucket.org/dfee/>`__)
+
+0.15.10 (2017-06-23):
+ - top level PreservedScalarString not indented if not explicitly asked to
+ - remove Makefile (not very useful anyway)
+ - some mypy additions
+
+0.15.9 (2017-06-16):
+ - fix for issue 127: tagged scalars were always quoted and seperated
+ by a newline when in a block sequence (reported and largely fixed by
+ `Tommy Wang <https://bitbucket.org/twang817/>`__)
+
+0.15.8 (2017-06-15):
+ - allow plug-in install via ``install ruamel.yaml[jinja2]``
+
+0.15.7 (2017-06-14):
+ - add plug-in mechanism for load/dump pre resp. post-processing
+
+0.15.6 (2017-06-10):
+ - a set() with duplicate elements now throws error in rt loading
+ - support for toplevel column zero literal/folded scalar in explicit documents
+
+0.15.5 (2017-06-08):
+ - repeat `load()` on a single `YAML()` instance would fail.
+
+0.15.4 (2017-06-08):
+ - `transform` parameter on dump that expects a function taking a
+ string and returning a string. This allows transformation of the output
+ before it is written to stream. This forces creation of the complete output in memory!
+ - some updates to the docs
+
+0.15.3 (2017-06-07):
+ - No longer try to compile C extensions on Windows. Compilation can be forced by setting
+ the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value
+ before starting the `pip install`.
+
+0.15.2 (2017-06-07):
+ - update to conform to mypy 0.511: mypy --strict
+
+0.15.1 (2017-06-07):
+ - `duplicate keys <http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys>`__
+ in mappings generate an error (in the old API this change generates a warning until 0.16)
+ - dependecy on ruamel.ordereddict for 2.7 now via extras_require
+
+0.15.0 (2017-06-04):
+ - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all
+ load/dump functions
+ - passing in a non-supported object (e.g. a string) as "stream" will result in a
+ much more meaningful YAMLStreamError.
+ - assigning a normal string value to an existing CommentedMap key or CommentedSeq
+ element will result in a value cast to the previous value's type if possible.
+ - added ``YAML`` class for new API
+
+0.14.12 (2017-05-14):
+ - fix for issue 119, deepcopy not returning subclasses (reported and PR by
+ Constantine Evans <[email protected]>)
+
+0.14.11 (2017-05-01):
+ - fix for issue 103 allowing implicit documents after document end marker line (``...``)
+ in YAML 1.2
+
+0.14.10 (2017-04-26):
+ - fix problem with emitting using cyaml
+
+0.14.9 (2017-04-22):
+ - remove dependency on ``typing`` while still supporting ``mypy``
+ (http://stackoverflow.com/a/43516781/1307905)
+ - fix unclarity in doc that stated 2.6 is supported (reported by feetdust)
+
+0.14.8 (2017-04-19):
+ - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards
+ on all files (reported by `João Paulo Magalhães <https://bitbucket.org/jpmag/>`__)
+
+0.14.7 (2017-04-18):
+ - round trip of integers (decimal, octal, hex, binary) now preserve
+ leading zero(s) padding and underscores. Underscores are presumed
+ to be at regular distances (i.e. ``0o12_345_67`` dumps back as
+ ``0o1_23_45_67`` as the space from the last digit to the
+ underscore before that is the determining factor).
+
+0.14.6 (2017-04-14):
+ - binary, octal and hex integers are now preserved by default. This
+ was a known deficiency. Working on this was prompted by the issue report (112)
+ from devnoname120, as well as the additional experience with `.replace()`
+ on `scalarstring` classes.
+ - fix issues 114: cannot install on Buildozer (reported by mixmastamyk).
+ Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check.
+
+0.14.5 (2017-04-04):
+ - fix issue 109: None not dumping correctly at top level (reported by Andrea Censi)
+ - fix issue 110: .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString
+ would give back "normal" string (reported by sandres23)
+
+0.14.4 (2017-03-31):
+ - fix readme
+
+0.14.3 (2017-03-31):
+ - fix for 0o52 not being a string in YAML 1.1 (reported on
+ `StackOverflow Q&A 43138503 <http://stackoverflow.com/a/43138503/1307905>`__ by
+ `Frank D <http://stackoverflow.com/users/7796630/frank-d>`__)
+
+0.14.2 (2017-03-23):
+ - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch)
+
+0.14.1 (2017-03-22):
+ - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré)
+
+0.14.0 (2017-03-21):
+ - updates for mypy --strict
+ - preparation for moving away from inheritance in Loader and Dumper, calls from e.g.
+ the Representer to the Serializer.serialize() are now done via the attribute
+ .serializer.serialize(). Usage of .serialize() outside of Serializer will be
+ deprecated soon
+ - some extra tests on main.py functions
+
+----
+
+For older changes see the file
+`CHANGES <https://bitbucket.org/ruamel/yaml/src/default/CHANGES>`_
diff --git a/libs/dynaconf/vendor/ruamel/yaml/__init__.py b/libs/dynaconf/vendor/ruamel/yaml/__init__.py
new file mode 100644
index 000000000..8663a56da
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/__init__.py
@@ -0,0 +1,60 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+if False: # MYPY
+ from typing import Dict, Any # NOQA
+
+_package_data = dict(
+ full_package_name='ruamel.yaml',
+ version_info=(0, 16, 10),
+ __version__='0.16.10',
+ author='Anthon van der Neut',
+ author_email='[email protected]',
+ description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA
+ entry_points=None,
+ since=2014,
+ extras_require={':platform_python_implementation=="CPython" and python_version<="2.7"': [
+ 'ruamel.ordereddict',
+ ], ':platform_python_implementation=="CPython" and python_version<"3.9"': [
+ 'ruamel.yaml.clib>=0.1.2',
+ ], 'jinja2': ['ruamel.yaml.jinja2>=0.2'], 'docs': ['ryd']},
+ # NOQA
+ # test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n', # NOQA
+ classifiers=[
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Programming Language :: Python :: Implementation :: PyPy',
+ 'Programming Language :: Python :: Implementation :: Jython',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Text Processing :: Markup',
+ 'Typing :: Typed',
+ ],
+ keywords='yaml 1.2 parser round-trip preserve quotes order config',
+ read_the_docs='yaml',
+ supported=[(2, 7), (3, 5)], # minimum
+ tox=dict(
+ env='*', # remove 'pn', no longer test narrow Python 2.7 for unicode patterns and PyPy
+ deps='ruamel.std.pathlib',
+ fl8excl='_test/lib',
+ ),
+ universal=True,
+ rtfd='yaml',
+) # type: Dict[Any, Any]
+
+
+version_info = _package_data['version_info']
+__version__ = _package_data['__version__']
+
+try:
+ from .cyaml import * # NOQA
+
+ __with_libyaml__ = True
+except (ImportError, ValueError): # for Jython
+ __with_libyaml__ = False
+
+from dynaconf.vendor.ruamel.yaml.main import * # NOQA
diff --git a/libs/dynaconf/vendor/ruamel/yaml/anchor.py b/libs/dynaconf/vendor/ruamel/yaml/anchor.py
new file mode 100644
index 000000000..aa649f552
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/anchor.py
@@ -0,0 +1,20 @@
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+anchor_attrib = '_yaml_anchor'
+
+
+class Anchor(object):
+ __slots__ = 'value', 'always_dump'
+ attrib = anchor_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.value = None
+ self.always_dump = False
+
+ def __repr__(self):
+ # type: () -> Any
+ ad = ', (always dump)' if self.always_dump else ""
+ return 'Anchor({!r}{})'.format(self.value, ad)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/comments.py b/libs/dynaconf/vendor/ruamel/yaml/comments.py
new file mode 100644
index 000000000..1ca210af4
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/comments.py
@@ -0,0 +1,1149 @@
+# coding: utf-8
+
+from __future__ import absolute_import, print_function
+
+"""
+stuff to deal with comments and formatting on dict/list/ordereddict/set
+these are not really related, formatting could be factored out as
+a separate base
+"""
+
+import sys
+import copy
+
+
+from .compat import ordereddict # type: ignore
+from .compat import PY2, string_types, MutableSliceableSequence
+from .scalarstring import ScalarString
+from .anchor import Anchor
+
+if PY2:
+ from collections import MutableSet, Sized, Set, Mapping
+else:
+ from collections.abc import MutableSet, Sized, Set, Mapping
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
+
+# fmt: off
+__all__ = ['CommentedSeq', 'CommentedKeySeq',
+ 'CommentedMap', 'CommentedOrderedMap',
+ 'CommentedSet', 'comment_attrib', 'merge_attrib']
+# fmt: on
+
+comment_attrib = '_yaml_comment'
+format_attrib = '_yaml_format'
+line_col_attrib = '_yaml_line_col'
+merge_attrib = '_yaml_merge'
+tag_attrib = '_yaml_tag'
+
+
+class Comment(object):
+ # sys.getsize tested the Comment objects, __slots__ makes them bigger
+ # and adding self.end did not matter
+ __slots__ = 'comment', '_items', '_end', '_start'
+ attrib = comment_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.comment = None # [post, [pre]]
+ # map key (mapping/omap/dict) or index (sequence/list) to a list of
+ # dict: post_key, pre_key, post_value, pre_value
+ # list: pre item, post item
+ self._items = {} # type: Dict[Any, Any]
+ # self._start = [] # should not put these on first item
+ self._end = [] # type: List[Any] # end of document comments
+
+ def __str__(self):
+ # type: () -> str
+ if bool(self._end):
+ end = ',\n end=' + str(self._end)
+ else:
+ end = ""
+ return 'Comment(comment={0},\n items={1}{2})'.format(self.comment, self._items, end)
+
+ @property
+ def items(self):
+ # type: () -> Any
+ return self._items
+
+ @property
+ def end(self):
+ # type: () -> Any
+ return self._end
+
+ @end.setter
+ def end(self, value):
+ # type: (Any) -> None
+ self._end = value
+
+ @property
+ def start(self):
+ # type: () -> Any
+ return self._start
+
+ @start.setter
+ def start(self, value):
+ # type: (Any) -> None
+ self._start = value
+
+
+# to distinguish key from None
+def NoComment():
+ # type: () -> None
+ pass
+
+
+class Format(object):
+ __slots__ = ('_flow_style',)
+ attrib = format_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self._flow_style = None # type: Any
+
+ def set_flow_style(self):
+ # type: () -> None
+ self._flow_style = True
+
+ def set_block_style(self):
+ # type: () -> None
+ self._flow_style = False
+
+ def flow_style(self, default=None):
+ # type: (Optional[Any]) -> Any
+ """if default (the flow_style) is None, the flow style tacked on to
+ the object explicitly will be taken. If that is None as well the
+ default flow style rules the format down the line, or the type
+ of the constituent values (simple -> flow, map/list -> block)"""
+ if self._flow_style is None:
+ return default
+ return self._flow_style
+
+
+class LineCol(object):
+ attrib = line_col_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.line = None
+ self.col = None
+ self.data = None # type: Optional[Dict[Any, Any]]
+
+ def add_kv_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+ def key(self, k):
+ # type: (Any) -> Any
+ return self._kv(k, 0, 1)
+
+ def value(self, k):
+ # type: (Any) -> Any
+ return self._kv(k, 2, 3)
+
+ def _kv(self, k, x0, x1):
+ # type: (Any, Any, Any) -> Any
+ if self.data is None:
+ return None
+ data = self.data[k]
+ return data[x0], data[x1]
+
+ def item(self, idx):
+ # type: (Any) -> Any
+ if self.data is None:
+ return None
+ return self.data[idx][0], self.data[idx][1]
+
+ def add_idx_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ if self.data is None:
+ self.data = {}
+ self.data[key] = data
+
+
+class Tag(object):
+ """store tag information for roundtripping"""
+
+ __slots__ = ('value',)
+ attrib = tag_attrib
+
+ def __init__(self):
+ # type: () -> None
+ self.value = None
+
+ def __repr__(self):
+ # type: () -> Any
+ return '{0.__class__.__name__}({0.value!r})'.format(self)
+
+
+class CommentedBase(object):
+ @property
+ def ca(self):
+ # type: () -> Any
+ if not hasattr(self, Comment.attrib):
+ setattr(self, Comment.attrib, Comment())
+ return getattr(self, Comment.attrib)
+
+ def yaml_end_comment_extend(self, comment, clear=False):
+ # type: (Any, bool) -> None
+ if comment is None:
+ return
+ if clear or self.ca.end is None:
+ self.ca.end = []
+ self.ca.end.extend(comment)
+
+ def yaml_key_comment_extend(self, key, comment, clear=False):
+ # type: (Any, Any, bool) -> None
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[1] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[1] = comment[1]
+ else:
+ r[1].extend(comment[0])
+ r[0] = comment[0]
+
+ def yaml_value_comment_extend(self, key, comment, clear=False):
+ # type: (Any, Any, bool) -> None
+ r = self.ca._items.setdefault(key, [None, None, None, None])
+ if clear or r[3] is None:
+ if comment[1] is not None:
+ assert isinstance(comment[1], list)
+ r[3] = comment[1]
+ else:
+ r[3].extend(comment[0])
+ r[2] = comment[0]
+
+ def yaml_set_start_comment(self, comment, indent=0):
+ # type: (Any, Any) -> None
+ """overwrites any preceding comment lines on an object
+ expects comment to be without `#` and possible have multiple lines
+ """
+ from .error import CommentMark
+ from .tokens import CommentToken
+
+ pre_comments = self._yaml_get_pre_comment()
+ if comment[-1] == '\n':
+ comment = comment[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ for com in comment.split('\n'):
+ pre_comments.append(CommentToken('# ' + com + '\n', start_mark, None))
+
+ def yaml_set_comment_before_after_key(
+ self, key, before=None, indent=0, after=None, after_indent=None
+ ):
+ # type: (Any, Any, Any, Any, Any) -> None
+ """
+ expects comment (before/after) to be without `#` and possible have multiple lines
+ """
+ from dynaconf.vendor.ruamel.yaml.error import CommentMark
+ from dynaconf.vendor.ruamel.yaml.tokens import CommentToken
+
+ def comment_token(s, mark):
+ # type: (Any, Any) -> Any
+ # handle empty lines as having no comment
+ return CommentToken(('# ' if s else "") + s + '\n', mark, None)
+
+ if after_indent is None:
+ after_indent = indent + 2
+ if before and (len(before) > 1) and before[-1] == '\n':
+ before = before[:-1] # strip final newline if there
+ if after and after[-1] == '\n':
+ after = after[:-1] # strip final newline if there
+ start_mark = CommentMark(indent)
+ c = self.ca.items.setdefault(key, [None, [], None, None])
+ if before == '\n':
+ c[1].append(comment_token("", start_mark))
+ elif before:
+ for com in before.split('\n'):
+ c[1].append(comment_token(com, start_mark))
+ if after:
+ start_mark = CommentMark(after_indent)
+ if c[3] is None:
+ c[3] = []
+ for com in after.split('\n'):
+ c[3].append(comment_token(com, start_mark)) # type: ignore
+
+ @property
+ def fa(self):
+ # type: () -> Any
+ """format attribute
+
+ set_flow_style()/set_block_style()"""
+ if not hasattr(self, Format.attrib):
+ setattr(self, Format.attrib, Format())
+ return getattr(self, Format.attrib)
+
+ def yaml_add_eol_comment(self, comment, key=NoComment, column=None):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """
+ there is a problem as eol comments should start with ' #'
+ (but at the beginning of the line the space doesn't have to be before
+ the #. The column index is for the # mark
+ """
+ from .tokens import CommentToken
+ from .error import CommentMark
+
+ if column is None:
+ try:
+ column = self._yaml_get_column(key)
+ except AttributeError:
+ column = 0
+ if comment[0] != '#':
+ comment = '# ' + comment
+ if column is None:
+ if comment[0] == '#':
+ comment = ' ' + comment
+ column = 0
+ start_mark = CommentMark(column)
+ ct = [CommentToken(comment, start_mark, None), None]
+ self._yaml_add_eol_comment(ct, key=key)
+
+ @property
+ def lc(self):
+ # type: () -> Any
+ if not hasattr(self, LineCol.attrib):
+ setattr(self, LineCol.attrib, LineCol())
+ return getattr(self, LineCol.attrib)
+
+ def _yaml_set_line_col(self, line, col):
+ # type: (Any, Any) -> None
+ self.lc.line = line
+ self.lc.col = col
+
+ def _yaml_set_kv_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ self.lc.add_kv_line_col(key, data)
+
+ def _yaml_set_idx_line_col(self, key, data):
+ # type: (Any, Any) -> None
+ self.lc.add_idx_line_col(key, data)
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ return self.anchor
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ @property
+ def tag(self):
+ # type: () -> Any
+ if not hasattr(self, Tag.attrib):
+ setattr(self, Tag.attrib, Tag())
+ return getattr(self, Tag.attrib)
+
+ def yaml_set_tag(self, value):
+ # type: (Any) -> None
+ self.tag.value = value
+
+ def copy_attributes(self, t, memo=None):
+ # type: (Any, Any) -> None
+ # fmt: off
+ for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib,
+ Tag.attrib, merge_attrib]:
+ if hasattr(self, a):
+ if memo is not None:
+ setattr(t, a, copy.deepcopy(getattr(self, a, memo)))
+ else:
+ setattr(t, a, getattr(self, a))
+ # fmt: on
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ raise NotImplementedError
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ raise NotImplementedError
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ raise NotImplementedError
+
+
+class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore
+ __slots__ = (Comment.attrib, '_lst')
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ list.__init__(self, *args, **kw)
+
+ def __getsingleitem__(self, idx):
+ # type: (Any) -> Any
+ return list.__getitem__(self, idx)
+
+ def __setsingleitem__(self, idx, value):
+ # type: (Any, Any) -> None
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if idx < len(self):
+ if (
+ isinstance(value, string_types)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[idx], ScalarString)
+ ):
+ value = type(self[idx])(value)
+ list.__setitem__(self, idx, value)
+
+ def __delsingleitem__(self, idx=None):
+ # type: (Any) -> Any
+ list.__delitem__(self, idx)
+ self.ca.items.pop(idx, None) # might not be there -> default value
+ for list_index in sorted(self.ca.items):
+ if list_index < idx:
+ continue
+ self.ca.items[list_index - 1] = self.ca.items.pop(list_index)
+
+ def __len__(self):
+ # type: () -> int
+ return list.__len__(self)
+
+ def insert(self, idx, val):
+ # type: (Any, Any) -> None
+ """the comments after the insertion have to move forward"""
+ list.insert(self, idx, val)
+ for list_index in sorted(self.ca.items, reverse=True):
+ if list_index < idx:
+ break
+ self.ca.items[list_index + 1] = self.ca.items.pop(list_index)
+
+ def extend(self, val):
+ # type: (Any) -> None
+ list.extend(self, val)
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ return list.__eq__(self, other)
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res.append(copy.deepcopy(k, memo))
+ self.copy_attributes(res, memo=memo)
+ return res
+
+ def __add__(self, other):
+ # type: (Any) -> Any
+ return list.__add__(self, other)
+
+ def sort(self, key=None, reverse=False): # type: ignore
+ # type: (Any, bool) -> None
+ if key is None:
+ tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse)
+ list.__init__(self, [x[0] for x in tmp_lst])
+ else:
+ tmp_lst = sorted(
+ zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse
+ )
+ list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst])
+ itm = self.ca.items
+ self.ca._items = {}
+ for idx, x in enumerate(tmp_lst):
+ old_index = x[1]
+ if old_index in itm:
+ self.ca.items[idx] = itm[old_index]
+
+ def __repr__(self):
+ # type: () -> Any
+ return list.__repr__(self)
+
+
+class CommentedKeySeq(tuple, CommentedBase): # type: ignore
+ """This primarily exists to be able to roundtrip keys that are sequences"""
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedMapView(Sized):
+ __slots__ = ('_mapping',)
+
+ def __init__(self, mapping):
+ # type: (Any) -> None
+ self._mapping = mapping
+
+ def __len__(self):
+ # type: () -> int
+ count = len(self._mapping)
+ return count
+
+
+class CommentedMapKeysView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it):
+ # type: (Any) -> Any
+ return set(it)
+
+ def __contains__(self, key):
+ # type: (Any) -> Any
+ return key in self._mapping
+
+ def __iter__(self):
+ # type: () -> Any # yield from self._mapping # not in py27, pypy
+ # for x in self._mapping._keys():
+ for x in self._mapping:
+ yield x
+
+
+class CommentedMapItemsView(CommentedMapView, Set): # type: ignore
+ __slots__ = ()
+
+ @classmethod
+ def _from_iterable(self, it):
+ # type: (Any) -> Any
+ return set(it)
+
+ def __contains__(self, item):
+ # type: (Any) -> Any
+ key, value = item
+ try:
+ v = self._mapping[key]
+ except KeyError:
+ return False
+ else:
+ return v == value
+
+ def __iter__(self):
+ # type: () -> Any
+ for key in self._mapping._keys():
+ yield (key, self._mapping[key])
+
+
+class CommentedMapValuesView(CommentedMapView):
+ __slots__ = ()
+
+ def __contains__(self, value):
+ # type: (Any) -> Any
+ for key in self._mapping:
+ if value == self._mapping[key]:
+ return True
+ return False
+
+ def __iter__(self):
+ # type: () -> Any
+ for key in self._mapping._keys():
+ yield self._mapping[key]
+
+
+class CommentedMap(ordereddict, CommentedBase): # type: ignore
+ __slots__ = (Comment.attrib, '_ok', '_ref')
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ self._ok = set() # type: MutableSet[Any] # own keys
+ self._ref = [] # type: List[CommentedMap]
+ ordereddict.__init__(self, *args, **kw)
+
+ def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NoComment:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][2].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post, last = None, None, None
+ for x in self:
+ if pre is not None and x != key:
+ post = x
+ break
+ if x == key:
+ pre = last
+ last = x
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for k1 in self:
+ if k1 >= key:
+ break
+ if k1 not in self.ca.items:
+ continue
+ sel_idx = k1
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+ def update(self, vals):
+ # type: (Any) -> None
+ try:
+ ordereddict.update(self, vals)
+ except TypeError:
+ # probably a dict that is used
+ for x in vals:
+ self[x] = vals[x]
+ try:
+ self._ok.update(vals.keys()) # type: ignore
+ except AttributeError:
+ # assume a list/tuple of two element lists/tuples
+ for x in vals:
+ self._ok.add(x[0])
+
+ def insert(self, pos, key, value, comment=None):
+ # type: (Any, Any, Any, Optional[Any]) -> None
+ """insert key value into given position
+ attach comment if provided
+ """
+ ordereddict.insert(self, pos, key, value)
+ self._ok.add(key)
+ if comment is not None:
+ self.yaml_add_eol_comment(comment, key=key)
+
+ def mlget(self, key, default=None, list_ok=False):
+ # type: (Any, Any, Any) -> Any
+ """multi-level get that expects dicts within dicts"""
+ if not isinstance(key, list):
+ return self.get(key, default)
+ # assume that the key is a list of recursively accessible dicts
+
+ def get_one_level(key_list, level, d):
+ # type: (Any, Any, Any) -> Any
+ if not list_ok:
+ assert isinstance(d, dict)
+ if level >= len(key_list):
+ if level > len(key_list):
+ raise IndexError
+ return d[key_list[level - 1]]
+ return get_one_level(key_list, level + 1, d[key_list[level - 1]])
+
+ try:
+ return get_one_level(key, 1, self)
+ except KeyError:
+ return default
+ except (TypeError, IndexError):
+ if not list_ok:
+ raise
+ return default
+
+ def __getitem__(self, key):
+ # type: (Any) -> Any
+ try:
+ return ordereddict.__getitem__(self, key)
+ except KeyError:
+ for merged in getattr(self, merge_attrib, []):
+ if key in merged[1]:
+ return merged[1][key]
+ raise
+
+ def __setitem__(self, key, value):
+ # type: (Any, Any) -> None
+ # try to preserve the scalarstring type if setting an existing key to a new value
+ if key in self:
+ if (
+ isinstance(value, string_types)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[key], ScalarString)
+ ):
+ value = type(self[key])(value)
+ ordereddict.__setitem__(self, key, value)
+ self._ok.add(key)
+
+ def _unmerged_contains(self, key):
+ # type: (Any) -> Any
+ if key in self._ok:
+ return True
+ return None
+
+ def __contains__(self, key):
+ # type: (Any) -> bool
+ return bool(ordereddict.__contains__(self, key))
+
+ def get(self, key, default=None):
+ # type: (Any, Any) -> Any
+ try:
+ return self.__getitem__(key)
+ except: # NOQA
+ return default
+
+ def __repr__(self):
+ # type: () -> Any
+ return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict')
+
+ def non_merged_items(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ if x in self._ok:
+ yield x, ordereddict.__getitem__(self, x)
+
+ def __delitem__(self, key):
+ # type: (Any) -> None
+ # for merged in getattr(self, merge_attrib, []):
+ # if key in merged[1]:
+ # value = merged[1][key]
+ # break
+ # else:
+ # # not found in merged in stuff
+ # ordereddict.__delitem__(self, key)
+ # for referer in self._ref:
+ # referer.update_key_value(key)
+ # return
+ #
+ # ordereddict.__setitem__(self, key, value) # merge might have different value
+ # self._ok.discard(key)
+ self._ok.discard(key)
+ ordereddict.__delitem__(self, key)
+ for referer in self._ref:
+ referer.update_key_value(key)
+
+ def __iter__(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def _keys(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return int(ordereddict.__len__(self))
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ return bool(dict(self) == other)
+
+ if PY2:
+
+ def keys(self):
+ # type: () -> Any
+ return list(self._keys())
+
+ def iterkeys(self):
+ # type: () -> Any
+ return self._keys()
+
+ def viewkeys(self):
+ # type: () -> Any
+ return CommentedMapKeysView(self)
+
+ else:
+
+ def keys(self):
+ # type: () -> Any
+ return CommentedMapKeysView(self)
+
+ if PY2:
+
+ def _values(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield ordereddict.__getitem__(self, x)
+
+ def values(self):
+ # type: () -> Any
+ return list(self._values())
+
+ def itervalues(self):
+ # type: () -> Any
+ return self._values()
+
+ def viewvalues(self):
+ # type: () -> Any
+ return CommentedMapValuesView(self)
+
+ else:
+
+ def values(self):
+ # type: () -> Any
+ return CommentedMapValuesView(self)
+
+ def _items(self):
+ # type: () -> Any
+ for x in ordereddict.__iter__(self):
+ yield x, ordereddict.__getitem__(self, x)
+
+ if PY2:
+
+ def items(self):
+ # type: () -> Any
+ return list(self._items())
+
+ def iteritems(self):
+ # type: () -> Any
+ return self._items()
+
+ def viewitems(self):
+ # type: () -> Any
+ return CommentedMapItemsView(self)
+
+ else:
+
+ def items(self):
+ # type: () -> Any
+ return CommentedMapItemsView(self)
+
+ @property
+ def merge(self):
+ # type: () -> Any
+ if not hasattr(self, merge_attrib):
+ setattr(self, merge_attrib, [])
+ return getattr(self, merge_attrib)
+
+ def copy(self):
+ # type: () -> Any
+ x = type(self)() # update doesn't work
+ for k, v in self._items():
+ x[k] = v
+ self.copy_attributes(x)
+ return x
+
+ def add_referent(self, cm):
+ # type: (Any) -> None
+ if cm not in self._ref:
+ self._ref.append(cm)
+
+ def add_yaml_merge(self, value):
+ # type: (Any) -> None
+ for v in value:
+ v[1].add_referent(self)
+ for k, v in v[1].items():
+ if ordereddict.__contains__(self, k):
+ continue
+ ordereddict.__setitem__(self, k, v)
+ self.merge.extend(value)
+
+ def update_key_value(self, key):
+ # type: (Any) -> None
+ if key in self._ok:
+ return
+ for v in self.merge:
+ if key in v[1]:
+ ordereddict.__setitem__(self, key, v[1][key])
+ return
+ ordereddict.__delitem__(self, key)
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ res = self.__class__()
+ memo[id(self)] = res
+ for k in self:
+ res[k] = copy.deepcopy(self[k], memo)
+ self.copy_attributes(res, memo=memo)
+ return res
+
+
+# based on brownie mappings
+@classmethod # type: ignore
+def raise_immutable(cls, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> None
+ raise TypeError('{} objects are immutable'.format(cls.__name__))
+
+
+class CommentedKeyMap(CommentedBase, Mapping): # type: ignore
+ __slots__ = Comment.attrib, '_od'
+ """This primarily exists to be able to roundtrip keys that are mappings"""
+
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ if hasattr(self, '_od'):
+ raise_immutable(self)
+ try:
+ self._od = ordereddict(*args, **kw)
+ except TypeError:
+ if PY2:
+ self._od = ordereddict(args[0].items())
+ else:
+ raise
+
+ __delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable
+
+ # need to implement __getitem__, __iter__ and __len__
+ def __getitem__(self, index):
+ # type: (Any) -> Any
+ return self._od[index]
+
+ def __iter__(self):
+ # type: () -> Iterator[Any]
+ for x in self._od.__iter__():
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return len(self._od)
+
+ def __hash__(self):
+ # type: () -> Any
+ return hash(tuple(self.items()))
+
+ def __repr__(self):
+ # type: () -> Any
+ if not hasattr(self, merge_attrib):
+ return self._od.__repr__()
+ return 'ordereddict(' + repr(list(self._od.items())) + ')'
+
+ @classmethod
+ def fromkeys(keys, v=None):
+ # type: (Any, Any) -> Any
+ return CommentedKeyMap(dict.fromkeys(keys, v))
+
+ def _yaml_add_comment(self, comment, key=NoComment):
+ # type: (Any, Optional[Any]) -> None
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ self._yaml_add_comment(comment, key=key)
+
+ def _yaml_get_columnX(self, key):
+ # type: (Any) -> Any
+ return self.ca.items[key][0].start_mark.column
+
+ def _yaml_get_column(self, key):
+ # type: (Any) -> Any
+ column = None
+ sel_idx = None
+ pre, post = key - 1, key + 1
+ if pre in self.ca.items:
+ sel_idx = pre
+ elif post in self.ca.items:
+ sel_idx = post
+ else:
+ # self.ca.items is not ordered
+ for row_idx, _k1 in enumerate(self):
+ if row_idx >= key:
+ break
+ if row_idx not in self.ca.items:
+ continue
+ sel_idx = row_idx
+ if sel_idx is not None:
+ column = self._yaml_get_columnX(sel_idx)
+ return column
+
+ def _yaml_get_pre_comment(self):
+ # type: () -> Any
+ pre_comments = [] # type: List[Any]
+ if self.ca.comment is None:
+ self.ca.comment = [None, pre_comments]
+ else:
+ self.ca.comment[1] = pre_comments
+ return pre_comments
+
+
+class CommentedOrderedMap(CommentedMap):
+ __slots__ = (Comment.attrib,)
+
+
+class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA
+ __slots__ = Comment.attrib, 'odict'
+
+ def __init__(self, values=None):
+ # type: (Any) -> None
+ self.odict = ordereddict()
+ MutableSet.__init__(self)
+ if values is not None:
+ self |= values # type: ignore
+
+ def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
+ # type: (Any, Optional[Any], Optional[Any]) -> None
+ """values is set to key to indicate a value attachment of comment"""
+ if key is not NoComment:
+ self.yaml_key_comment_extend(key, comment)
+ return
+ if value is not NoComment:
+ self.yaml_value_comment_extend(value, comment)
+ else:
+ self.ca.comment = comment
+
+ def _yaml_add_eol_comment(self, comment, key):
+ # type: (Any, Any) -> None
+ """add on the value line, with value specified by the key"""
+ self._yaml_add_comment(comment, value=key)
+
+ def add(self, value):
+ # type: (Any) -> None
+ """Add an element."""
+ self.odict[value] = None
+
+ def discard(self, value):
+ # type: (Any) -> None
+ """Remove an element. Do not raise an exception if absent."""
+ del self.odict[value]
+
+ def __contains__(self, x):
+ # type: (Any) -> Any
+ return x in self.odict
+
+ def __iter__(self):
+ # type: () -> Any
+ for x in self.odict:
+ yield x
+
+ def __len__(self):
+ # type: () -> int
+ return len(self.odict)
+
+ def __repr__(self):
+ # type: () -> str
+ return 'set({0!r})'.format(self.odict.keys())
+
+
+class TaggedScalar(CommentedBase):
+ # the value and style attributes are set during roundtrip construction
+ def __init__(self, value=None, style=None, tag=None):
+ # type: (Any, Any, Any) -> None
+ self.value = value
+ self.style = style
+ if tag is not None:
+ self.yaml_set_tag(tag)
+
+ def __str__(self):
+ # type: () -> Any
+ return self.value
+
+
+def dump_comments(d, name="", sep='.', out=sys.stdout):
+ # type: (Any, str, str, Any) -> None
+ """
+ recursively dump comments, all but the toplevel preceded by the path
+ in dotted form x.0.a
+ """
+ if isinstance(d, dict) and hasattr(d, 'ca'):
+ if name:
+ sys.stdout.write('{}\n'.format(name))
+ out.write('{}\n'.format(d.ca)) # type: ignore
+ for k in d:
+ dump_comments(d[k], name=(name + sep + k) if name else k, sep=sep, out=out)
+ elif isinstance(d, list) and hasattr(d, 'ca'):
+ if name:
+ sys.stdout.write('{}\n'.format(name))
+ out.write('{}\n'.format(d.ca)) # type: ignore
+ for idx, k in enumerate(d):
+ dump_comments(
+ k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out
+ )
diff --git a/libs/dynaconf/vendor/ruamel/yaml/compat.py b/libs/dynaconf/vendor/ruamel/yaml/compat.py
new file mode 100644
index 000000000..c48cb5813
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/compat.py
@@ -0,0 +1,324 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+# partially from package six by Benjamin Peterson
+
+import sys
+import os
+import types
+import traceback
+from abc import abstractmethod
+
+
+# fmt: off
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA
+ from typing import Optional # NOQA
+# fmt: on
+
+_DEFAULT_YAML_VERSION = (1, 2)
+
+try:
+ from ruamel.ordereddict import ordereddict
+except: # NOQA
+ try:
+ from collections import OrderedDict
+ except ImportError:
+ from ordereddict import OrderedDict # type: ignore
+ # to get the right name import ... as ordereddict doesn't do that
+
+ class ordereddict(OrderedDict): # type: ignore
+ if not hasattr(OrderedDict, 'insert'):
+
+ def insert(self, pos, key, value):
+ # type: (int, Any, Any) -> None
+ if pos >= len(self):
+ self[key] = value
+ return
+ od = ordereddict()
+ od.update(self)
+ for k in od:
+ del self[k]
+ for index, old_key in enumerate(od):
+ if pos == index:
+ self[key] = value
+ self[old_key] = od[old_key]
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+
+if PY3:
+
+ def utf8(s):
+ # type: (str) -> str
+ return s
+
+ def to_str(s):
+ # type: (str) -> str
+ return s
+
+ def to_unicode(s):
+ # type: (str) -> str
+ return s
+
+
+else:
+ if False:
+ unicode = str
+
+ def utf8(s):
+ # type: (unicode) -> str
+ return s.encode('utf-8')
+
+ def to_str(s):
+ # type: (str) -> str
+ return str(s)
+
+ def to_unicode(s):
+ # type: (str) -> unicode
+ return unicode(s) # NOQA
+
+
+if PY3:
+ string_types = str
+ integer_types = int
+ class_types = type
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+ unichr = chr
+ import io
+
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ # have unlimited precision
+ no_limit_int = int
+ from collections.abc import Hashable, MutableSequence, MutableMapping, Mapping # NOQA
+
+else:
+ string_types = basestring # NOQA
+ integer_types = (int, long) # NOQA
+ class_types = (type, types.ClassType)
+ text_type = unicode # NOQA
+ binary_type = str
+
+ # to allow importing
+ unichr = unichr
+ from StringIO import StringIO as _StringIO
+
+ StringIO = _StringIO
+ import cStringIO
+
+ BytesIO = cStringIO.StringIO
+ # have unlimited precision
+ no_limit_int = long # NOQA not available on Python 3
+ from collections import Hashable, MutableSequence, MutableMapping, Mapping # NOQA
+
+if False: # MYPY
+ # StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO]
+ # StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore
+ StreamType = Any
+
+ StreamTextType = StreamType # Union[Text, StreamType]
+ VersionType = Union[List[int], str, Tuple[int, int]]
+
+if PY3:
+ builtins_module = 'builtins'
+else:
+ builtins_module = '__builtin__'
+
+UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2
+
+
+def with_metaclass(meta, *bases):
+ # type: (Any, Any) -> Any
+ """Create a base class with a metaclass."""
+ return meta('NewBase', bases, {})
+
+
+DBG_TOKEN = 1
+DBG_EVENT = 2
+DBG_NODE = 4
+
+
+_debug = None # type: Optional[int]
+if 'RUAMELDEBUG' in os.environ:
+ _debugx = os.environ.get('RUAMELDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+
+
+if bool(_debug):
+
+ class ObjectCounter(object):
+ def __init__(self):
+ # type: () -> None
+ self.map = {} # type: Dict[Any, Any]
+
+ def __call__(self, k):
+ # type: (Any) -> None
+ self.map[k] = self.map.get(k, 0) + 1
+
+ def dump(self):
+ # type: () -> None
+ for k in sorted(self.map):
+ sys.stdout.write('{} -> {}'.format(k, self.map[k]))
+
+ object_counter = ObjectCounter()
+
+
+# used from yaml util when testing
+def dbg(val=None):
+ # type: (Any) -> Any
+ global _debug
+ if _debug is None:
+ # set to true or false
+ _debugx = os.environ.get('YAMLDEBUG')
+ if _debugx is None:
+ _debug = 0
+ else:
+ _debug = int(_debugx)
+ if val is None:
+ return _debug
+ return _debug & val
+
+
+class Nprint(object):
+ def __init__(self, file_name=None):
+ # type: (Any) -> None
+ self._max_print = None # type: Any
+ self._count = None # type: Any
+ self._file_name = file_name
+
+ def __call__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ if not bool(_debug):
+ return
+ out = sys.stdout if self._file_name is None else open(self._file_name, 'a')
+ dbgprint = print # to fool checking for print statements by dv utility
+ kw1 = kw.copy()
+ kw1['file'] = out
+ dbgprint(*args, **kw1)
+ out.flush()
+ if self._max_print is not None:
+ if self._count is None:
+ self._count = self._max_print
+ self._count -= 1
+ if self._count == 0:
+ dbgprint('forced exit\n')
+ traceback.print_stack()
+ out.flush()
+ sys.exit(0)
+ if self._file_name:
+ out.close()
+
+ def set_max_print(self, i):
+ # type: (int) -> None
+ self._max_print = i
+ self._count = None
+
+
+nprint = Nprint()
+nprintf = Nprint('/var/tmp/ruamel.yaml.log')
+
+# char checkers following production rules
+
+
+def check_namespace_char(ch):
+ # type: (Any) -> bool
+ if u'\x21' <= ch <= u'\x7E': # ! to ~
+ return True
+ if u'\xA0' <= ch <= u'\uD7FF':
+ return True
+ if (u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': # excl. byte order mark
+ return True
+ if u'\U00010000' <= ch <= u'\U0010FFFF':
+ return True
+ return False
+
+
+def check_anchorname_char(ch):
+ # type: (Any) -> bool
+ if ch in u',[]{}':
+ return False
+ return check_namespace_char(ch)
+
+
+def version_tnf(t1, t2=None):
+ # type: (Any, Any) -> Any
+ """
+ return True if ruamel.yaml version_info < t1, None if t2 is specified and bigger else False
+ """
+ from dynaconf.vendor.ruamel.yaml import version_info # NOQA
+
+ if version_info < t1:
+ return True
+ if t2 is not None and version_info < t2:
+ return None
+ return False
+
+
+class MutableSliceableSequence(MutableSequence): # type: ignore
+ __slots__ = ()
+
+ def __getitem__(self, index):
+ # type: (Any) -> Any
+ if not isinstance(index, slice):
+ return self.__getsingleitem__(index)
+ return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore
+
+ def __setitem__(self, index, value):
+ # type: (Any, Any) -> None
+ if not isinstance(index, slice):
+ return self.__setsingleitem__(index, value)
+ assert iter(value)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ if index.step is None:
+ del self[index.start : index.stop]
+ for elem in reversed(value):
+ self.insert(0 if index.start is None else index.start, elem)
+ else:
+ range_parms = index.indices(len(self))
+ nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[2] + 1
+ # need to test before changing, in case TypeError is caught
+ if nr_assigned_items < len(value):
+ raise TypeError(
+ 'too many elements in value {} < {}'.format(nr_assigned_items, len(value))
+ )
+ elif nr_assigned_items > len(value):
+ raise TypeError(
+ 'not enough elements in value {} > {}'.format(
+ nr_assigned_items, len(value)
+ )
+ )
+ for idx, i in enumerate(range(*range_parms)):
+ self[i] = value[idx]
+
+ def __delitem__(self, index):
+ # type: (Any) -> None
+ if not isinstance(index, slice):
+ return self.__delsingleitem__(index)
+ # nprint(index.start, index.stop, index.step, index.indices(len(self)))
+ for i in reversed(range(*index.indices(len(self)))):
+ del self[i]
+
+ @abstractmethod
+ def __getsingleitem__(self, index):
+ # type: (Any) -> Any
+ raise IndexError
+
+ @abstractmethod
+ def __setsingleitem__(self, index, value):
+ # type: (Any, Any) -> None
+ raise IndexError
+
+ @abstractmethod
+ def __delsingleitem__(self, index):
+ # type: (Any) -> None
+ raise IndexError
diff --git a/libs/dynaconf/vendor/ruamel/yaml/composer.py b/libs/dynaconf/vendor/ruamel/yaml/composer.py
new file mode 100644
index 000000000..96e67a7a9
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/composer.py
@@ -0,0 +1,238 @@
+# coding: utf-8
+
+from __future__ import absolute_import, print_function
+
+import warnings
+
+from .error import MarkedYAMLError, ReusedAnchorWarning
+from .compat import utf8, nprint, nprintf # NOQA
+
+from .events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+)
+from .nodes import MappingNode, ScalarNode, SequenceNode
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+__all__ = ['Composer', 'ComposerError']
+
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+
+class Composer(object):
+ def __init__(self, loader=None):
+ # type: (Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_composer', None) is None:
+ self.loader._composer = self
+ self.anchors = {} # type: Dict[Any, Any]
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ self.loader.parser
+ return self.loader._parser
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ # assert self.loader._resolver is not None
+ if hasattr(self.loader, 'typ'):
+ self.loader.resolver
+ return self.loader._resolver
+
+ def check_node(self):
+ # type: () -> Any
+ # Drop the STREAM-START event.
+ if self.parser.check_event(StreamStartEvent):
+ self.parser.get_event()
+
+ # If there are more documents available?
+ return not self.parser.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # type: () -> Any
+ # Get the root node of the next document.
+ if not self.parser.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # type: () -> Any
+ # Drop the STREAM-START event.
+ self.parser.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None # type: Any
+ if not self.parser.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.parser.check_event(StreamEndEvent):
+ event = self.parser.get_event()
+ raise ComposerError(
+ 'expected a single document in the stream',
+ document.start_mark,
+ 'but found another document',
+ event.start_mark,
+ )
+
+ # Drop the STREAM-END event.
+ self.parser.get_event()
+
+ return document
+
+ def compose_document(self):
+ # type: (Any) -> Any
+ # Drop the DOCUMENT-START event.
+ self.parser.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.parser.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ # type: (Any, Any) -> Any
+ if self.parser.check_event(AliasEvent):
+ event = self.parser.get_event()
+ alias = event.anchor
+ if alias not in self.anchors:
+ raise ComposerError(
+ None, None, 'found undefined alias %r' % utf8(alias), event.start_mark
+ )
+ return self.anchors[alias]
+ event = self.parser.peek_event()
+ anchor = event.anchor
+ if anchor is not None: # have an anchor
+ if anchor in self.anchors:
+ # raise ComposerError(
+ # "found duplicate anchor %r; first occurrence"
+ # % utf8(anchor), self.anchors[anchor].start_mark,
+ # "second occurrence", event.start_mark)
+ ws = (
+ '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence '
+ '{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark)
+ )
+ warnings.warn(ws, ReusedAnchorWarning)
+ self.resolver.descend_resolver(parent, index)
+ if self.parser.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.parser.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.parser.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.resolver.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ # type: (Any) -> Any
+ event = self.parser.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(
+ tag,
+ event.value,
+ event.start_mark,
+ event.end_mark,
+ style=event.style,
+ comment=event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ # type: (Any) -> Any
+ start_event = self.parser.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.parser.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ if node.comment is not None:
+ nprint(
+ 'Warning: unexpected end_event commment in sequence '
+ 'node {}'.format(node.flow_style)
+ )
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def compose_mapping_node(self, anchor):
+ # type: (Any) -> Any
+ start_event = self.parser.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.parser.check_event(MappingEndEvent):
+ # key_event = self.parser.peek_event()
+ item_key = self.compose_node(node, None)
+ # if item_key in node.value:
+ # raise ComposerError("while composing a mapping",
+ # start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ # node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.parser.get_event()
+ if node.flow_style is True and end_event.comment is not None:
+ node.comment = end_event.comment
+ node.end_mark = end_event.end_mark
+ self.check_end_doc_comment(end_event, node)
+ return node
+
+ def check_end_doc_comment(self, end_event, node):
+ # type: (Any, Any) -> None
+ if end_event.comment and end_event.comment[1]:
+ # pre comments on an end_event, no following to move to
+ if node.comment is None:
+ node.comment = [None, None]
+ assert not isinstance(node, ScalarEvent)
+ # this is a post comment on a mapping node, add as third element
+ # in the list
+ node.comment.append(end_event.comment[1])
+ end_event.comment[1] = None
diff --git a/libs/dynaconf/vendor/ruamel/yaml/configobjwalker.py b/libs/dynaconf/vendor/ruamel/yaml/configobjwalker.py
new file mode 100644
index 000000000..711efbc2d
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/configobjwalker.py
@@ -0,0 +1,14 @@
+# coding: utf-8
+
+import warnings
+
+from .util import configobj_walker as new_configobj_walker
+
+if False: # MYPY
+ from typing import Any # NOQA
+
+
+def configobj_walker(cfg):
+ # type: (Any) -> Any
+ warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code')
+ return new_configobj_walker(cfg)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/constructor.py b/libs/dynaconf/vendor/ruamel/yaml/constructor.py
new file mode 100644
index 000000000..5d82ce5c0
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/constructor.py
@@ -0,0 +1,1805 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division
+
+import datetime
+import base64
+import binascii
+import re
+import sys
+import types
+import warnings
+
+# fmt: off
+from .error import (MarkedYAMLError, MarkedYAMLFutureWarning,
+ MantissaNoDotYAML1_1Warning)
+from .nodes import * # NOQA
+from .nodes import (SequenceNode, MappingNode, ScalarNode)
+from .compat import (utf8, builtins_module, to_str, PY2, PY3, # NOQA
+ text_type, nprint, nprintf, version_tnf)
+from .compat import ordereddict, Hashable, MutableSequence # type: ignore
+from .compat import MutableMapping # type: ignore
+
+from .comments import * # NOQA
+from .comments import (CommentedMap, CommentedOrderedMap, CommentedSet,
+ CommentedKeySeq, CommentedSeq, TaggedScalar,
+ CommentedKeyMap)
+from .scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString,
+ LiteralScalarString, FoldedScalarString,
+ PlainScalarString, ScalarString,)
+from .scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from .scalarfloat import ScalarFloat
+from .scalarbool import ScalarBoolean
+from .timestamp import TimeStamp
+from .util import RegExp
+
+if False: # MYPY
+ from typing import Any, Dict, List, Set, Generator, Union, Optional # NOQA
+
+
+__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
+ 'ConstructorError', 'RoundTripConstructor']
+# fmt: on
+
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+
+class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning):
+ pass
+
+
+class DuplicateKeyError(MarkedYAMLFutureWarning):
+ pass
+
+
+class BaseConstructor(object):
+
+ yaml_constructors = {} # type: Dict[Any, Any]
+ yaml_multi_constructors = {} # type: Dict[Any, Any]
+
+ def __init__(self, preserve_quotes=None, loader=None):
+ # type: (Optional[bool], Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_constructor', None) is None:
+ self.loader._constructor = self
+ self.loader = loader
+ self.yaml_base_dict_type = dict
+ self.yaml_base_list_type = list
+ self.constructed_objects = {} # type: Dict[Any, Any]
+ self.recursive_objects = {} # type: Dict[Any, Any]
+ self.state_generators = [] # type: List[Any]
+ self.deep_construct = False
+ self._preserve_quotes = preserve_quotes
+ self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16))
+
+ @property
+ def composer(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.composer
+ try:
+ return self.loader._composer
+ except AttributeError:
+ sys.stdout.write('slt {}\n'.format(type(self)))
+ sys.stdout.write('slc {}\n'.format(self.loader._composer))
+ sys.stdout.write('{}\n'.format(dir(self)))
+ raise
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ def check_data(self):
+ # type: () -> Any
+ # If there are more documents available?
+ return self.composer.check_node()
+
+ def get_data(self):
+ # type: () -> Any
+ # Construct and return the next document.
+ if self.composer.check_node():
+ return self.construct_document(self.composer.get_node())
+
+ def get_single_data(self):
+ # type: () -> Any
+ # Ensure that the stream contains a single document and construct it.
+ node = self.composer.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ # type: (Any) -> Any
+ data = self.construct_object(node)
+ while bool(self.state_generators):
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for _dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ return self.recursive_objects[node]
+ # raise ConstructorError(
+ # None, None, 'found unconstructable recursive node', node.start_mark
+ # )
+ self.recursive_objects[node] = None
+ data = self.construct_non_recursive_object(node)
+
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_non_recursive_object(self, node, tag=None):
+ # type: (Any, Optional[str]) -> Any
+ constructor = None # type: Any
+ tag_suffix = None
+ if tag is None:
+ tag = node.tag
+ if tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag.startswith(tag_prefix):
+ tag_suffix = tag[len(tag_prefix) :]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for _dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ return data
+
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark
+ )
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark
+ )
+ return [self.construct_object(child, deep=deep) for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ total_mapping = self.yaml_base_dict_type()
+ if getattr(node, 'merge', None) is not None:
+ todo = [(node.merge, False), (node.value, False)]
+ else:
+ todo = [(node.value, True)]
+ for values, check in todo:
+ mapping = self.yaml_base_dict_type() # type: Dict[Any, Any]
+ for key_node, value_node in values:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if PY2:
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
+ else:
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+
+ value = self.construct_object(value_node, deep=deep)
+ if check:
+ if self.check_mapping_key(node, key_node, mapping, key, value):
+ mapping[key] = value
+ else:
+ mapping[key] = value
+ total_mapping.update(mapping)
+ return total_mapping
+
+ def check_mapping_key(self, node, key_node, mapping, key, value):
+ # type: (Any, Any, Any, Any, Any) -> bool
+ """return True if key is unique"""
+ if key in mapping:
+ if not self.allow_duplicate_keys:
+ mk = mapping.get(key)
+ if PY2:
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ if isinstance(mk, unicode):
+ mk = mk.encode('utf-8')
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}" with value "{}" '
+ '(original value: "{}")'.format(key, value, mk),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ return False
+ return True
+
+ def check_set_key(self, node, key_node, setting, key):
+ # type: (Any, Any, Any, Any, Any) -> None
+ if key in setting:
+ if not self.allow_duplicate_keys:
+ if PY2:
+ if isinstance(key, unicode):
+ key = key.encode('utf-8')
+ args = [
+ 'while constructing a set',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+
+ def construct_pairs(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ # type: (Any, Any) -> None
+ if 'yaml_constructors' not in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ # type: (Any, Any) -> None
+ if 'yaml_multi_constructors' not in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+
+class SafeConstructor(BaseConstructor):
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ # type: (Any) -> Any
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+ merge = [] # type: List[Any]
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ if merge: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key_node.value),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping for merging, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ 'but found %s' % value_node.id,
+ value_node.start_mark,
+ )
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if bool(merge):
+ node.merge = merge # separate merge keys to be able to update without duplicate
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ # type: (Any, bool) -> Any
+ """deep is True when creating an object/mapping recursively,
+ in that case want the underlying elements available during construction
+ """
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ # type: (Any) -> Any
+ self.construct_scalar(node)
+ return None
+
+ # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'y': True,
+ u'n': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ # type: (Any) -> bool
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ # type: (Any) -> int
+ value_s = to_str(self.construct_scalar(node))
+ value_s = value_s.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ return sign * int(value_s[2:], 2)
+ elif value_s.startswith('0x'):
+ return sign * int(value_s[2:], 16)
+ elif value_s.startswith('0o'):
+ return sign * int(value_s[2:], 8)
+ elif self.resolver.processing_version == (1, 1) and value_s[0] == '0':
+ return sign * int(value_s, 8)
+ elif self.resolver.processing_version == (1, 1) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ return sign * int(value_s)
+
+ inf_value = 1e300
+ while inf_value != inf_value * inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ # type: (Any) -> float
+ value_so = to_str(self.construct_scalar(node))
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ elif value_s == '.nan':
+ return self.nan_value
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ else:
+ if self.resolver.processing_version != (1, 2) and 'e' in value_s:
+ # value_s is lower case independent of input
+ mantissa, exponent = value_s.split('e')
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+ return sign * float(value_s)
+
+ if PY3:
+
+ def construct_yaml_binary(self, node):
+ # type: (Any) -> Any
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to convert base64 data into ascii: %s' % exc,
+ node.start_mark,
+ )
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
+
+ else:
+
+ def construct_yaml_binary(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ try:
+ return to_str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError) as exc:
+ raise ConstructorError(
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
+
+ timestamp_regexp = RegExp(
+ u"""^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:((?P<t>[Tt])|[ \\t]+) # explictly not retaining extra spaces
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\\.(?P<fraction>[0-9]*))?
+ (?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$""",
+ re.X,
+ )
+
+ def construct_yaml_timestamp(self, node, values=None):
+ # type: (Any, Any) -> Any
+ if values is None:
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to construct timestamp from "{}"'.format(node.value),
+ node.start_mark,
+ )
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction_s = values['fraction'][:6]
+ while len(fraction_s) < 6:
+ fraction_s += '0'
+ fraction = int(fraction_s)
+ if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4:
+ fraction += 1
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ minutes = values['tz_minute']
+ tz_minute = int(minutes) if minutes else 0
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ # should do something else instead (or hook this up to the preceding if statement
+ # in reverse
+ # if delta is None:
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction)
+ # return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ # datetime.timezone.utc)
+ # the above is not good enough though, should provide tzinfo. In Python3 that is easily
+ # doable drop that kind of support for Python2 as it has not native tzinfo
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
+
+ def construct_yaml_omap(self, node):
+ # type: (Any) -> Any
+ # Note: we do now check for duplicate keys
+ omap = ordereddict()
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ omap[key] = value
+
+ def construct_yaml_pairs(self, node):
+ # type: (Any) -> Any
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = [] # type: List[Any]
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ # type: (Any) -> Any
+ data = set() # type: Set[Any]
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ if PY3:
+ return value
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ # type: (Any) -> Any
+ data = self.yaml_base_list_type() # type: List[Any]
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ # type: (Any) -> Any
+ data = self.yaml_base_dict_type() # type: Dict[Any, Any]
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ # type: (Any, Any) -> Any
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ # type: (Any) -> None
+ raise ConstructorError(
+ None,
+ None,
+ 'could not determine a constructor for the tag %r' % utf8(node.tag),
+ node.start_mark,
+ )
+
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float
+)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary
+)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp
+)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs
+)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined)
+
+if PY2:
+
+ class classobj:
+ pass
+
+
+class Constructor(SafeConstructor):
+ def construct_python_str(self, node):
+ # type: (Any) -> Any
+ return utf8(self.construct_scalar(node))
+
+ def construct_python_unicode(self, node):
+ # type: (Any) -> Any
+ return self.construct_scalar(node)
+
+ if PY3:
+
+ def construct_python_bytes(self, node):
+ # type: (Any) -> Any
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to convert base64 data into ascii: %s' % exc,
+ node.start_mark,
+ )
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
+
+ def construct_python_long(self, node):
+ # type: (Any) -> int
+ val = self.construct_yaml_int(node)
+ if PY3:
+ return val
+ return int(val)
+
+ def construct_python_complex(self, node):
+ # type: (Any) -> Any
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ # type: (Any) -> Any
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark):
+ # type: (Any, Any) -> Any
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ mark,
+ 'cannot find module %r (%s)' % (utf8(name), exc),
+ mark,
+ )
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark):
+ # type: (Any, Any) -> Any
+ if not name:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
+ if u'.' in name:
+ lname = name.split('.')
+ lmodule_name = lname
+ lobject_name = [] # type: List[Any]
+ while len(lmodule_name) > 1:
+ lobject_name.insert(0, lmodule_name.pop())
+ module_name = '.'.join(lmodule_name)
+ try:
+ __import__(module_name)
+ # object_name = '.'.join(object_name)
+ break
+ except ImportError:
+ continue
+ else:
+ module_name = builtins_module
+ lobject_name = [name]
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'cannot find module %r (%s)' % (utf8(module_name), exc),
+ mark,
+ )
+ module = sys.modules[module_name]
+ object_name = '.'.join(lobject_name)
+ obj = module
+ while lobject_name:
+ if not hasattr(obj, lobject_name[0]):
+
+ raise ConstructorError(
+ 'while constructing a Python object',
+ mark,
+ 'cannot find %r in the module %r' % (utf8(object_name), module.__name__),
+ mark,
+ )
+ obj = getattr(obj, lobject_name.pop(0))
+ return obj
+
+ def construct_python_name(self, suffix, node):
+ # type: (Any, Any) -> Any
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python name',
+ node.start_mark,
+ 'expected the empty value, but found %r' % utf8(value),
+ node.start_mark,
+ )
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ # type: (Any, Any) -> Any
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError(
+ 'while constructing a Python module',
+ node.start_mark,
+ 'expected the empty value, but found %r' % utf8(value),
+ node.start_mark,
+ )
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+ # type: (Any, Any, Any, Any, bool) -> Any
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if PY3:
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+ else:
+ if newobj and isinstance(cls, type(classobj)) and not args and not kwds:
+ instance = classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state):
+ # type: (Any, Any) -> None
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {} # type: Dict[Any, Any]
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # type: (Any, Any) -> Any
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ self.recursive_objects[node] = instance
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # type: (Any, Any, bool) -> Any
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {} # type: Dict[Any, Any]
+ state = {} # type: Dict[Any, Any]
+ listitems = [] # type: List[Any]
+ dictitems = {} # type: Dict[Any, Any]
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if bool(state):
+ self.set_python_instance_state(instance, state)
+ if bool(listitems):
+ instance.extend(listitems)
+ if bool(dictitems):
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ # type: (Any, Any) -> Any
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/none', Constructor.construct_yaml_null)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/str', Constructor.construct_python_str)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode
+)
+
+if PY3:
+ Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes
+ )
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/int', Constructor.construct_yaml_int)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/long', Constructor.construct_python_long
+)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/float', Constructor.construct_yaml_float
+)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex
+)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq)
+
+Constructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple
+)
+
+Constructor.add_constructor(u'tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:', Constructor.construct_python_name
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:', Constructor.construct_python_module
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:', Constructor.construct_python_object
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply
+)
+
+Constructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new
+)
+
+
+class RoundTripConstructor(SafeConstructor):
+ """need to store the comments on the node itself,
+ as well as on the items
+ """
+
+ def construct_scalar(self, node):
+ # type: (Any) -> Any
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(
+ None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark
+ )
+
+ if node.style == '|' and isinstance(node.value, text_type):
+ lss = LiteralScalarString(node.value, anchor=node.anchor)
+ if node.comment and node.comment[1]:
+ lss.comment = node.comment[1][0] # type: ignore
+ return lss
+ if node.style == '>' and isinstance(node.value, text_type):
+ fold_positions = [] # type: List[int]
+ idx = -1
+ while True:
+ idx = node.value.find('\a', idx + 1)
+ if idx < 0:
+ break
+ fold_positions.append(idx - len(fold_positions))
+ fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor)
+ if node.comment and node.comment[1]:
+ fss.comment = node.comment[1][0] # type: ignore
+ if fold_positions:
+ fss.fold_pos = fold_positions # type: ignore
+ return fss
+ elif bool(self._preserve_quotes) and isinstance(node.value, text_type):
+ if node.style == "'":
+ return SingleQuotedScalarString(node.value, anchor=node.anchor)
+ if node.style == '"':
+ return DoubleQuotedScalarString(node.value, anchor=node.anchor)
+ if node.anchor:
+ return PlainScalarString(node.value, anchor=node.anchor)
+ return node.value
+
+ def construct_yaml_int(self, node):
+ # type: (Any) -> Any
+ width = None # type: Any
+ value_su = to_str(self.construct_scalar(node))
+ try:
+ sx = value_su.rstrip('_')
+ underscore = [len(sx) - sx.rindex('_') - 1, False, False] # type: Any
+ except ValueError:
+ underscore = None
+ except IndexError:
+ underscore = None
+ value_s = value_su.replace('_', "")
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ value_s = value_s[1:]
+ if value_s == '0':
+ return 0
+ elif value_s.startswith('0b'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return BinaryInt(
+ sign * int(value_s[2:], 2),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0x'):
+ # default to lower-case if no a-fA-F in string
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ hex_fun = HexInt # type: Any
+ for ch in value_s[2:]:
+ if ch in 'ABCDEF': # first non-digit is capital
+ hex_fun = HexCapsInt
+ break
+ if ch in 'abcdef':
+ break
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return hex_fun(
+ sign * int(value_s[2:], 16),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif value_s.startswith('0o'):
+ if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
+ width = len(value_s[2:])
+ if underscore is not None:
+ underscore[1] = value_su[2] == '_'
+ underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
+ return OctalInt(
+ sign * int(value_s[2:], 8),
+ width=width,
+ underscore=underscore,
+ anchor=node.anchor,
+ )
+ elif self.resolver.processing_version != (1, 2) and value_s[0] == '0':
+ return sign * int(value_s, 8)
+ elif self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [int(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ elif self.resolver.processing_version > (1, 1) and value_s[0] == '0':
+ # not an octal, an integer with leading zero(s)
+ if underscore is not None:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(sign * int(value_s), width=len(value_s), underscore=underscore)
+ elif underscore:
+ # cannot have a leading underscore
+ underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
+ return ScalarInt(
+ sign * int(value_s), width=None, underscore=underscore, anchor=node.anchor
+ )
+ elif node.anchor:
+ return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor)
+ else:
+ return sign * int(value_s)
+
+ def construct_yaml_float(self, node):
+ # type: (Any) -> Any
+ def leading_zeros(v):
+ # type: (Any) -> int
+ lead0 = 0
+ idx = 0
+ while idx < len(v) and v[idx] in '0.':
+ if v[idx] == '0':
+ lead0 += 1
+ idx += 1
+ return lead0
+
+ # underscore = None
+ m_sign = False # type: Any
+ value_so = to_str(self.construct_scalar(node))
+ value_s = value_so.replace('_', "").lower()
+ sign = +1
+ if value_s[0] == '-':
+ sign = -1
+ if value_s[0] in '+-':
+ m_sign = value_s[0]
+ value_s = value_s[1:]
+ if value_s == '.inf':
+ return sign * self.inf_value
+ if value_s == '.nan':
+ return self.nan_value
+ if self.resolver.processing_version != (1, 2) and ':' in value_s:
+ digits = [float(part) for part in value_s.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit * base
+ base *= 60
+ return sign * value
+ if 'e' in value_s:
+ try:
+ mantissa, exponent = value_so.split('e')
+ exp = 'e'
+ except ValueError:
+ mantissa, exponent = value_so.split('E')
+ exp = 'E'
+ if self.resolver.processing_version != (1, 2):
+ # value_s is lower case independent of input
+ if '.' not in mantissa:
+ warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so))
+ lead0 = leading_zeros(mantissa)
+ width = len(mantissa)
+ prec = mantissa.find('.')
+ if m_sign:
+ width -= 1
+ e_width = len(exponent)
+ e_sign = exponent[0] in '+-'
+ # nprint('sf', width, prec, m_sign, exp, e_width, e_sign)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ exp=exp,
+ e_width=e_width,
+ e_sign=e_sign,
+ anchor=node.anchor,
+ )
+ width = len(value_so)
+ prec = value_so.index('.') # you can use index, this would not be float without dot
+ lead0 = leading_zeros(value_so)
+ return ScalarFloat(
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ anchor=node.anchor,
+ )
+
+ def construct_yaml_str(self, node):
+ # type: (Any) -> Any
+ value = self.construct_scalar(node)
+ if isinstance(value, ScalarString):
+ return value
+ if PY3:
+ return value
+ try:
+ return value.encode('ascii')
+ except AttributeError:
+ # in case you replace the node dynamically e.g. with a dict
+ return value
+ except UnicodeEncodeError:
+ return value
+
+ def construct_rt_sequence(self, node, seqtyp, deep=False):
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark
+ )
+ ret_val = []
+ if node.comment:
+ seqtyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ if node.anchor:
+ from dynaconf.vendor.ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ seqtyp.yaml_set_anchor(node.anchor)
+ for idx, child in enumerate(node.value):
+ if child.comment:
+ seqtyp._yaml_add_comment(child.comment, key=idx)
+ child.comment = None # if moved to sequence remove from child
+ ret_val.append(self.construct_object(child, deep=deep))
+ seqtyp._yaml_set_idx_line_col(
+ idx, [child.start_mark.line, child.start_mark.column]
+ )
+ return ret_val
+
+ def flatten_mapping(self, node):
+ # type: (Any) -> Any
+ """
+ This implements the merge key feature http://yaml.org/type/merge.html
+ by inserting keys from the merge dict/list of dicts if not yet
+ available in this node
+ """
+
+ def constructed(value_node):
+ # type: (Any) -> Any
+ # If the contents of a merge are defined within the
+ # merge marker, then they won't have been constructed
+ # yet. But if they were already constructed, we need to use
+ # the existing object.
+ if value_node in self.constructed_objects:
+ value = self.constructed_objects[value_node]
+ else:
+ value = self.construct_object(value_node, deep=False)
+ return value
+
+ # merge = []
+ merge_map_list = [] # type: List[Any]
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ if merge_map_list: # double << key
+ if self.allow_duplicate_keys:
+ del node.value[index]
+ index += 1
+ continue
+ args = [
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found duplicate key "{}"'.format(key_node.value),
+ key_node.start_mark,
+ """
+ To suppress this check see:
+ http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
+ """,
+ """\
+ Duplicate keys will become an error in future releases, and are errors
+ by default when using the new API.
+ """,
+ ]
+ if self.allow_duplicate_keys is None:
+ warnings.warn(DuplicateKeyFutureWarning(*args))
+ else:
+ raise DuplicateKeyError(*args)
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ merge_map_list.append((index, constructed(value_node)))
+ # self.flatten_mapping(value_node)
+ # merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ # submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping for merging, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ merge_map_list.append((index, constructed(subnode)))
+ # self.flatten_mapping(subnode)
+ # submerge.append(subnode.value)
+ # submerge.reverse()
+ # for value in submerge:
+ # merge.extend(value)
+ else:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ 'but found %s' % value_node.id,
+ value_node.start_mark,
+ )
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ return merge_map_list
+ # if merge:
+ # node.value = merge + node.value
+
+ def _sentinel(self):
+ # type: () -> None
+ pass
+
+ def construct_mapping(self, node, maptyp, deep=False): # type: ignore
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ merge_map = self.flatten_mapping(node)
+ # mapping = {}
+ if node.comment:
+ maptyp._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
+ if node.anchor:
+ from dynaconf.vendor.ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ maptyp.yaml_set_anchor(node.anchor)
+ last_key, last_value = None, self._sentinel
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, MutableSequence):
+ key_s = CommentedKeySeq(key)
+ if key_node.flow_style is True:
+ key_s.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_s.fa.set_block_style()
+ key = key_s
+ elif isinstance(key, MutableMapping):
+ key_m = CommentedKeyMap(key)
+ if key_node.flow_style is True:
+ key_m.fa.set_flow_style()
+ elif key_node.flow_style is False:
+ key_m.fa.set_block_style()
+ key = key_m
+ if PY2:
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
+ else:
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ value = self.construct_object(value_node, deep=deep)
+ if self.check_mapping_key(node, key_node, maptyp, key, value):
+ if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]:
+ if last_value is None:
+ key_node.comment[0] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, value=last_key)
+ else:
+ key_node.comment[2] = key_node.comment.pop(4)
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ key_node.comment = None
+ if key_node.comment:
+ maptyp._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ maptyp._yaml_add_comment(value_node.comment, value=key)
+ maptyp._yaml_set_kv_line_col(
+ key,
+ [
+ key_node.start_mark.line,
+ key_node.start_mark.column,
+ value_node.start_mark.line,
+ value_node.start_mark.column,
+ ],
+ )
+ maptyp[key] = value
+ last_key, last_value = key, value # could use indexing
+ # do this last, or <<: before a key will prevent insertion in instances
+ # of collections.OrderedDict (as they have no __contains__
+ if merge_map:
+ maptyp.add_yaml_merge(merge_map)
+
+ def construct_setting(self, node, typ, deep=False):
+ # type: (Any, Any, bool) -> Any
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
+ if node.comment:
+ typ._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ typ.yaml_end_comment_extend(node.comment[2], clear=True)
+ if node.anchor:
+ from dynaconf.vendor.ruamel.yaml.serializer import templated_id
+
+ if not templated_id(node.anchor):
+ typ.yaml_set_anchor(node.anchor)
+ for key_node, value_node in node.value:
+ # keys can be list -> deep
+ key = self.construct_object(key_node, deep=True)
+ # lists are not hashable, but tuples are
+ if not isinstance(key, Hashable):
+ if isinstance(key, list):
+ key = tuple(key)
+ if PY2:
+ try:
+ hash(key)
+ except TypeError as exc:
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
+ else:
+ if not isinstance(key, Hashable):
+ raise ConstructorError(
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
+ # construct but should be null
+ value = self.construct_object(value_node, deep=deep) # NOQA
+ self.check_set_key(node, key_node, typ, key)
+ if key_node.comment:
+ typ._yaml_add_comment(key_node.comment, key=key)
+ if value_node.comment:
+ typ._yaml_add_comment(value_node.comment, value=key)
+ typ.add(key)
+
+ def construct_yaml_seq(self, node):
+ # type: (Any) -> Any
+ data = CommentedSeq()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.comment:
+ data._yaml_add_comment(node.comment)
+ yield data
+ data.extend(self.construct_rt_sequence(node, data))
+ self.set_collection_style(data, node)
+
+ def construct_yaml_map(self, node):
+ # type: (Any) -> Any
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ yield data
+ self.construct_mapping(node, data, deep=True)
+ self.set_collection_style(data, node)
+
+ def set_collection_style(self, data, node):
+ # type: (Any, Any) -> None
+ if len(data) == 0:
+ return
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+
+ def construct_yaml_object(self, node, cls):
+ # type: (Any, Any) -> Any
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = SafeConstructor.construct_mapping(self, node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = SafeConstructor.construct_mapping(self, node)
+ data.__dict__.update(state)
+
+ def construct_yaml_omap(self, node):
+ # type: (Any) -> Any
+ # Note: we do now check for duplicate keys
+ omap = CommentedOrderedMap()
+ omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ omap.fa.set_flow_style()
+ elif node.flow_style is False:
+ omap.fa.set_block_style()
+ yield omap
+ if node.comment:
+ omap._yaml_add_comment(node.comment[:2])
+ if len(node.comment) > 2:
+ omap.yaml_end_comment_extend(node.comment[2], clear=True)
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ if len(subnode.value) != 1:
+ raise ConstructorError(
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ assert key not in omap
+ value = self.construct_object(value_node)
+ if key_node.comment:
+ omap._yaml_add_comment(key_node.comment, key=key)
+ if subnode.comment:
+ omap._yaml_add_comment(subnode.comment, key=key)
+ if value_node.comment:
+ omap._yaml_add_comment(value_node.comment, value=key)
+ omap[key] = value
+
+ def construct_yaml_set(self, node):
+ # type: (Any) -> Any
+ data = CommentedSet()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ yield data
+ self.construct_setting(node, data)
+
+ def construct_undefined(self, node):
+ # type: (Any) -> Any
+ try:
+ if isinstance(node, MappingNode):
+ data = CommentedMap()
+ data._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data.fa.set_flow_style()
+ elif node.flow_style is False:
+ data.fa.set_block_style()
+ data.yaml_set_tag(node.tag)
+ yield data
+ if node.anchor:
+ data.yaml_set_anchor(node.anchor)
+ self.construct_mapping(node, data)
+ return
+ elif isinstance(node, ScalarNode):
+ data2 = TaggedScalar()
+ data2.value = self.construct_scalar(node)
+ data2.style = node.style
+ data2.yaml_set_tag(node.tag)
+ yield data2
+ if node.anchor:
+ data2.yaml_set_anchor(node.anchor, always_dump=True)
+ return
+ elif isinstance(node, SequenceNode):
+ data3 = CommentedSeq()
+ data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column)
+ if node.flow_style is True:
+ data3.fa.set_flow_style()
+ elif node.flow_style is False:
+ data3.fa.set_block_style()
+ data3.yaml_set_tag(node.tag)
+ yield data3
+ if node.anchor:
+ data3.yaml_set_anchor(node.anchor)
+ data3.extend(self.construct_sequence(node))
+ return
+ except: # NOQA
+ pass
+ raise ConstructorError(
+ None,
+ None,
+ 'could not determine a constructor for the tag %r' % utf8(node.tag),
+ node.start_mark,
+ )
+
+ def construct_yaml_timestamp(self, node, values=None):
+ # type: (Any, Any) -> Any
+ try:
+ match = self.timestamp_regexp.match(node.value)
+ except TypeError:
+ match = None
+ if match is None:
+ raise ConstructorError(
+ None,
+ None,
+ 'failed to construct timestamp from "{}"'.format(node.value),
+ node.start_mark,
+ )
+ values = match.groupdict()
+ if not values['hour']:
+ return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']:
+ if values[part]:
+ break
+ else:
+ return SafeConstructor.construct_yaml_timestamp(self, node, values)
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction_s = values['fraction'][:6]
+ while len(fraction_s) < 6:
+ fraction_s += '0'
+ fraction = int(fraction_s)
+ if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4:
+ fraction += 1
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ minutes = values['tz_minute']
+ tz_minute = int(minutes) if minutes else 0
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ if delta:
+ dt = datetime.datetime(year, month, day, hour, minute)
+ dt -= delta
+ data = TimeStamp(dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction)
+ data._yaml['delta'] = delta
+ tz = values['tz_sign'] + values['tz_hour']
+ if values['tz_minute']:
+ tz += ':' + values['tz_minute']
+ data._yaml['tz'] = tz
+ else:
+ data = TimeStamp(year, month, day, hour, minute, second, fraction)
+ if values['tz']: # no delta
+ data._yaml['tz'] = values['tz']
+
+ if values['t']:
+ data._yaml['t'] = True
+ return data
+
+ def construct_yaml_bool(self, node):
+ # type: (Any) -> Any
+ b = SafeConstructor.construct_yaml_bool(self, node)
+ if node.anchor:
+ return ScalarBoolean(b, anchor=node.anchor)
+ return b
+
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq
+)
+
+RoundTripConstructor.add_constructor(
+ u'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map
+)
+
+RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/cyaml.py b/libs/dynaconf/vendor/ruamel/yaml/cyaml.py
new file mode 100644
index 000000000..2db5b0154
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/cyaml.py
@@ -0,0 +1,185 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from _ruamel_yaml import CParser, CEmitter # type: ignore
+
+from .constructor import Constructor, BaseConstructor, SafeConstructor
+from .representer import Representer, SafeRepresenter, BaseRepresenter
+from .resolver import Resolver, BaseResolver
+
+if False: # MYPY
+ from typing import Any, Union, Optional # NOQA
+ from .compat import StreamTextType, StreamType, VersionType # NOQA
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+
+# this includes some hacks to solve the usage of resolver by lower level
+# parts of the parser
+
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ BaseConstructor.__init__(self, loader=self)
+ BaseResolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ SafeConstructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CLoader(CParser, Constructor, Resolver): # type: ignore
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ CParser.__init__(self, stream)
+ self._parser = self._composer = self
+ Constructor.__init__(self, loader=self)
+ Resolver.__init__(self, loadumper=self)
+ # self.descend_resolver = self._resolver.descend_resolver
+ # self.ascend_resolver = self._resolver.ascend_resolver
+ # self.resolve = self._resolver.resolve
+
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ self._emitter = self._serializer = self._representer = self
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ SafeRepresenter.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
+ Resolver.__init__(self)
+
+
+class CDumper(CEmitter, Representer, Resolver): # type: ignore
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ self._emitter = self._serializer = self._representer = self
+ Representer.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
+ Resolver.__init__(self)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/dumper.py b/libs/dynaconf/vendor/ruamel/yaml/dumper.py
new file mode 100644
index 000000000..a2cd7b47e
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/dumper.py
@@ -0,0 +1,221 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from .emitter import Emitter
+from .serializer import Serializer
+from .representer import (
+ Representer,
+ SafeRepresenter,
+ BaseRepresenter,
+ RoundTripRepresenter,
+)
+from .resolver import Resolver, BaseResolver, VersionedResolver
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Optional # NOQA
+ from .compat import StreamType, VersionType # NOQA
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
+
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ BaseResolver.__init__(self, loadumper=self)
+
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ Representer.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ Resolver.__init__(self, loadumper=self)
+
+
+class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ RoundTripRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
+ VersionedResolver.__init__(self, loader=self)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/emitter.py b/libs/dynaconf/vendor/ruamel/yaml/emitter.py
new file mode 100644
index 000000000..c1eff8b9c
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/emitter.py
@@ -0,0 +1,1688 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+import sys
+from .error import YAMLError, YAMLStreamError
+from .events import * # NOQA
+
+# fmt: off
+from .compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, check_anchorname_char
+# fmt: on
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA
+ from .compat import StreamType # NOQA
+
+__all__ = ['Emitter', 'EmitterError']
+
+
+class EmitterError(YAMLError):
+ pass
+
+
+class ScalarAnalysis(object):
+ def __init__(
+ self,
+ scalar,
+ empty,
+ multiline,
+ allow_flow_plain,
+ allow_block_plain,
+ allow_single_quoted,
+ allow_double_quoted,
+ allow_block,
+ ):
+ # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+
+class Indents(object):
+ # replacement for the list based stack of None/int
+ def __init__(self):
+ # type: () -> None
+ self.values = [] # type: List[Tuple[int, bool]]
+
+ def append(self, val, seq):
+ # type: (Any, Any) -> None
+ self.values.append((val, seq))
+
+ def pop(self):
+ # type: () -> Any
+ return self.values.pop()[0]
+
+ def last_seq(self):
+ # type: () -> bool
+ # return the seq(uence) value for the element added before the last one
+ # in increase_indent()
+ try:
+ return self.values[-2][1]
+ except IndexError:
+ return False
+
+ def seq_flow_align(self, seq_indent, column):
+ # type: (int, int) -> int
+ # extra spaces because of dash
+ if len(self.values) < 2 or not self.values[-1][1]:
+ return 0
+ # -1 for the dash
+ base = self.values[-1][0] if self.values[-1][0] is not None else 0
+ return base + seq_indent - column - 1
+
+ def __len__(self):
+ # type: () -> int
+ return len(self.values)
+
+
+class Emitter(object):
+ # fmt: off
+ DEFAULT_TAG_PREFIXES = {
+ u'!': u'!',
+ u'tag:yaml.org,2002:': u'!!',
+ }
+ # fmt: on
+
+ MAX_SIMPLE_KEY_LENGTH = 128
+
+ def __init__(
+ self,
+ stream,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ brace_single_entry_mapping_in_flow_sequence=None,
+ dumper=None,
+ ):
+ # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA
+ self.dumper = dumper
+ if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None:
+ self.dumper._emitter = self
+ self.stream = stream
+
+ # Encoding can be overriden by STREAM-START.
+ self.encoding = None # type: Optional[Text]
+ self.allow_space_break = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = [] # type: List[Any]
+ self.state = self.expect_stream_start # type: Any
+
+ # Current event and the event queue.
+ self.events = [] # type: List[Any]
+ self.event = None # type: Any
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = Indents()
+ self.indent = None # type: Optional[int]
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context = [] # type: List[Text]
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+ self.compact_seq_seq = True # dash after dash
+ self.compact_seq_map = True # key after dash
+ # self.compact_ms = False # dash after key, only when excplicit key with ?
+ self.no_newline = None # type: Optional[bool] # set if directly after `- `
+
+ # Whether the document requires an explicit document end indicator
+ self.open_ended = False
+
+ # colon handling
+ self.colon = u':'
+ self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon
+ # single entry mappings in flow sequence
+ self.brace_single_entry_mapping_in_flow_sequence = (
+ brace_single_entry_mapping_in_flow_sequence
+ ) # NOQA
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis
+ self.unicode_supplementary = sys.maxunicode > 0xffff
+ self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0
+ self.top_level_colon_align = top_level_colon_align
+ self.best_sequence_indent = 2
+ self.requested_indent = indent # specific for literal zero indent
+ if indent and 1 < indent < 10:
+ self.best_sequence_indent = indent
+ self.best_map_indent = self.best_sequence_indent
+ # if self.best_sequence_indent < self.sequence_dash_offset + 1:
+ # self.best_sequence_indent = self.sequence_dash_offset + 1
+ self.best_width = 80
+ if width and width > self.best_sequence_indent * 2:
+ self.best_width = width
+ self.best_line_break = u'\n' # type: Any
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None # type: Any
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None # type: Any
+ self.prepared_tag = None # type: Any
+
+ # Scalar analysis and style.
+ self.analysis = None # type: Any
+ self.style = None # type: Any
+
+ self.scalar_after_indicator = True # write a scalar on the same line as `---`
+
+ @property
+ def stream(self):
+ # type: () -> Any
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('output stream needs to specified')
+
+ @stream.setter
+ def stream(self, val):
+ # type: (Any) -> None
+ if val is None:
+ return
+ if not hasattr(val, 'write'):
+ raise YAMLStreamError('stream argument needs to have a write() method')
+ self._stream = val
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ @property
+ def flow_level(self):
+ # type: () -> int
+ return len(self.flow_context)
+
+ def dispose(self):
+ # type: () -> None
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ # type: (Any) -> None
+ if dbg(DBG_EVENT):
+ nprint(event)
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ # type: () -> bool
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ # type: (int) -> bool
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return len(self.events) < count + 1
+
+ def increase_indent(self, flow=False, sequence=None, indentless=False):
+ # type: (bool, Optional[bool], bool) -> None
+ self.indents.append(self.indent, sequence)
+ if self.indent is None: # top level
+ if flow:
+ # self.indent = self.best_sequence_indent if self.indents.last_seq() else \
+ # self.best_map_indent
+ # self.indent = self.best_sequence_indent
+ self.indent = self.requested_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += (
+ self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent
+ )
+ # if self.indents.last_seq():
+ # if self.indent == 0: # top level block sequence
+ # self.indent = self.best_sequence_indent - self.sequence_dash_offset
+ # else:
+ # self.indent += self.best_sequence_indent
+ # else:
+ # self.indent += self.best_map_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ # type: () -> None
+ if isinstance(self.event, StreamStartEvent):
+ if PY2:
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ else:
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError('expected StreamStartEvent, but got %s' % (self.event,))
+
+ def expect_nothing(self):
+ # type: () -> None
+ raise EmitterError('expected nothing, but got %s' % (self.event,))
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ # type: () -> Any
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ # type: (bool) -> None
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (
+ first
+ and not self.event.explicit
+ and not self.canonical
+ and not self.event.version
+ and not self.event.tags
+ and not self.check_empty_document()
+ )
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError('expected DocumentStartEvent, but got %s' % (self.event,))
+
+ def expect_document_end(self):
+ # type: () -> None
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError('expected DocumentEndEvent, but got %s' % (self.event,))
+
+ def expect_document_root(self):
+ # type: () -> None
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
+ # type: (bool, bool, bool, bool) -> None
+ self.root_context = root
+ self.sequence_context = sequence # not used in PyYAML
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ if (
+ self.process_anchor(u'&')
+ and isinstance(self.event, ScalarEvent)
+ and self.sequence_context
+ ):
+ self.sequence_context = False
+ if (
+ root
+ and isinstance(self.event, ScalarEvent)
+ and not self.scalar_after_indicator
+ ):
+ self.write_indent()
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ # nprint('@', self.indention, self.no_newline, self.column)
+ i2, n2 = self.indention, self.no_newline # NOQA
+ if self.event.comment:
+ if self.event.flow_style is False and self.event.comment:
+ if self.write_post_comment(self.event):
+ self.indention = False
+ self.no_newline = True
+ if self.write_pre_comment(self.event):
+ self.indention = i2
+ self.no_newline = not self.indention
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_sequence()
+ ):
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.event.flow_style is False and self.event.comment:
+ self.write_post_comment(self.event)
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_mapping()
+ ):
+ self.expect_flow_mapping(single=self.event.nr_items == 1)
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError('expected NodeEvent, but got %s' % (self.event,))
+
+ def expect_alias(self):
+ # type: () -> None
+ if self.event.anchor is None:
+ raise EmitterError('anchor is not specified for alias')
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ # type: () -> None
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ # type: () -> None
+ ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
+ self.write_indicator(u' ' * ind + u'[', True, whitespace=True)
+ self.increase_indent(flow=True, sequence=True)
+ self.flow_context.append('[')
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ # type: () -> None
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ self.write_indicator(u']', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty flow sequence
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ # type: () -> None
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '['
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow sequence
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self, single=False):
+ # type: (Optional[bool]) -> None
+ ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column)
+ map_init = u'{'
+ if (
+ single
+ and self.flow_level
+ and self.flow_context[-1] == '['
+ and not self.canonical
+ and not self.brace_single_entry_mapping_in_flow_sequence
+ ):
+ # single map item with flow context, no curly braces necessary
+ map_init = u''
+ self.write_indicator(u' ' * ind + map_init, True, whitespace=True)
+ self.flow_context.append(map_init)
+ self.increase_indent(flow=True, sequence=False)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ # type: () -> None
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped == '{' # empty flow mapping
+ self.write_indicator(u'}', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on empty mapping
+ self.write_post_comment(self.event)
+ elif self.flow_level == 0:
+ self.write_line_break()
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ # type: () -> None
+ if isinstance(self.event, MappingEndEvent):
+ # if self.event.comment and self.event.comment[1]:
+ # self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ popped = self.flow_context.pop()
+ assert popped in [u'{', u'']
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ if popped != u'':
+ self.write_indicator(u'}', False)
+ if self.event.comment and self.event.comment[0]:
+ # eol comment on flow mapping, never reached on empty mappings
+ self.write_post_comment(self.event)
+ else:
+ self.no_newline = False
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ # type: () -> None
+ self.write_indicator(self.prefixed_colon, False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ # type: () -> None
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ # type: () -> None
+ if self.mapping_context:
+ indentless = not self.indention
+ else:
+ indentless = False
+ if not self.compact_seq_seq and self.column != 0:
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=True, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ # type: () -> Any
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ # type: (bool) -> None
+ if not first and isinstance(self.event, SequenceEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments on a block list e.g. empty line
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ self.no_newline = False
+ else:
+ if self.event.comment and self.event.comment[1]:
+ self.write_pre_comment(self.event)
+ nonl = self.no_newline if self.column == 0 else False
+ self.write_indent()
+ ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0
+ self.write_indicator(u' ' * ind + u'-', True, indention=True)
+ if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent:
+ self.no_newline = True
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ # type: () -> None
+ if not self.mapping_context and not (self.compact_seq_map or self.column == 0):
+ self.write_line_break()
+ self.increase_indent(flow=False, sequence=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ # type: () -> None
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ # type: (Any) -> None
+ if not first and isinstance(self.event, MappingEndEvent):
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ if self.event.comment and self.event.comment[1]:
+ # final comments from a doc
+ self.write_pre_comment(self.event)
+ self.write_indent()
+ if self.check_simple_key():
+ if not isinstance(
+ self.event, (SequenceStartEvent, MappingStartEvent)
+ ): # sequence keys
+ try:
+ if self.event.style == '?':
+ self.write_indicator(u'?', True, indention=True)
+ except AttributeError: # aliases have no style
+ pass
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ if isinstance(self.event, AliasEvent):
+ self.stream.write(u' ')
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ # type: () -> None
+ if getattr(self.event, 'style', None) != '?':
+ # prefix = u''
+ if self.indent == 0 and self.top_level_colon_align is not None:
+ # write non-prefixed colon
+ c = u' ' * (self.top_level_colon_align - self.column) + self.colon
+ else:
+ c = self.prefixed_colon
+ self.write_indicator(c, False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ # type: () -> None
+ self.write_indent()
+ self.write_indicator(self.prefixed_colon, True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ # type: () -> bool
+ return (
+ isinstance(self.event, SequenceStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], SequenceEndEvent)
+ )
+
+ def check_empty_mapping(self):
+ # type: () -> bool
+ return (
+ isinstance(self.event, MappingStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], MappingEndEvent)
+ )
+
+ def check_empty_document(self):
+ # type: () -> bool
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (
+ isinstance(event, ScalarEvent)
+ and event.anchor is None
+ and event.tag is None
+ and event.implicit
+ and event.value == ""
+ )
+
+ def check_simple_key(self):
+ # type: () -> bool
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if (
+ isinstance(self.event, (ScalarEvent, CollectionStartEvent))
+ and self.event.tag is not None
+ ):
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return length < self.MAX_SIMPLE_KEY_LENGTH and (
+ isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True)
+ or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True)
+ or (
+ isinstance(self.event, ScalarEvent)
+ # if there is an explicit style for an empty string, it is a simple key
+ and not (self.analysis.empty and self.style and self.style not in '\'"')
+ and not self.analysis.multiline
+ )
+ or self.check_empty_sequence()
+ or self.check_empty_mapping()
+ )
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ # type: (Any) -> bool
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return False
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator + self.prepared_anchor, True)
+ # issue 288
+ self.no_newline = False
+ self.prepared_anchor = None
+ return True
+
+ def process_tag(self):
+ # type: () -> None
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if (not self.canonical or tag is None) and (
+ (self.style == "" and self.event.implicit[0])
+ or (self.style != "" and self.event.implicit[1])
+ ):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError('tag is not specified')
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ if (
+ self.sequence_context
+ and not self.flow_level
+ and isinstance(self.event, ScalarEvent)
+ ):
+ self.no_newline = True
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ # type: () -> Any
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if (not self.event.style or self.event.style == '?') and (
+ self.event.implicit[0] or not self.event.implicit[2]
+ ):
+ if not (
+ self.simple_key_context and (self.analysis.empty or self.analysis.multiline)
+ ) and (
+ self.flow_level
+ and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain)
+ ):
+ return ""
+ self.analysis.allow_block = True
+ if self.event.style and self.event.style in '|>':
+ if (
+ not self.flow_level
+ and not self.simple_key_context
+ and self.analysis.allow_block
+ ):
+ return self.event.style
+ if not self.event.style and self.analysis.allow_double_quoted:
+ if "'" in self.event.value or '\n' in self.event.value:
+ return '"'
+ if not self.event.style or self.event.style == "'":
+ if self.analysis.allow_single_quoted and not (
+ self.simple_key_context and self.analysis.multiline
+ ):
+ return "'"
+ return '"'
+
+ def process_scalar(self):
+ # type: () -> None
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = not self.simple_key_context
+ # if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ # nprint('xx', self.sequence_context, self.flow_level)
+ if self.sequence_context and not self.flow_level:
+ self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == "'":
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar, self.event.comment)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+ if self.event.comment:
+ self.write_post_comment(self.event)
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ # type: (Any) -> Any
+ major, minor = version
+ if major != 1:
+ raise EmitterError('unsupported YAML version: %d.%d' % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ # type: (Any) -> Any
+ if not handle:
+ raise EmitterError('tag handle must not be empty')
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r" % (utf8(handle)))
+ for ch in handle[1:-1]:
+ if not (
+ u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in u'-_'
+ ):
+ raise EmitterError(
+ 'invalid character %r in the tag handle: %r' % (utf8(ch), utf8(handle))
+ )
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ # type: (Any) -> Any
+ if not prefix:
+ raise EmitterError('tag prefix must not be empty')
+ chunks = [] # type: List[Any]
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ ch_set = u"-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += u'#'
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in ch_set:
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end + 1
+ data = utf8(ch)
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return "".join(chunks)
+
+ def prepare_tag(self, tag):
+ # type: (Any) -> Any
+ if not tag:
+ raise EmitterError('tag must not be empty')
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix) :]
+ chunks = [] # type: List[Any]
+ start = end = 0
+ ch_set = u"-;/?:@&=+$,_.~*'()[]"
+ if self.dumper:
+ version = getattr(self.dumper, 'version', (1, 2))
+ if version is None or version >= (1, 2):
+ ch_set += u'#'
+ while end < len(suffix):
+ ch = suffix[end]
+ if (
+ u'0' <= ch <= u'9'
+ or u'A' <= ch <= u'Z'
+ or u'a' <= ch <= u'z'
+ or ch in ch_set
+ or (ch == u'!' and handle != u'!')
+ ):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end + 1
+ data = utf8(ch)
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = "".join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ # type: (Any) -> Any
+ if not anchor:
+ raise EmitterError('anchor must not be empty')
+ for ch in anchor:
+ if not check_anchorname_char(ch):
+ raise EmitterError(
+ 'invalid character %r in the anchor: %r' % (utf8(ch), utf8(anchor))
+ )
+ return anchor
+
+ def analyze_scalar(self, scalar):
+ # type: (Any) -> Any
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=True,
+ multiline=False,
+ allow_flow_plain=False,
+ allow_block_plain=True,
+ allow_single_quoted=True,
+ allow_double_quoted=True,
+ allow_block=False,
+ )
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceeded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = len(scalar) == 1 or scalar[1] in u'\0 \t\r\n\x85\u2028\u2029'
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:': # ToDo
+ if self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ elif len(scalar) == 1: # single character
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859
+ flow_indicators = True
+ if ch == u'?' and self.serializer.use_version == (1, 1):
+ flow_indicators = True
+ if ch == u':':
+ if followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ if ch == u'#' and preceeded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (
+ ch == u'\x85'
+ or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'
+ or (self.unicode_supplementary and (u'\U00010000' <= ch <= u'\U0010FFFF'))
+ ) and ch != u'\uFEFF':
+ # unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar) - 1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar) - 1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceeded_by_whitespace = ch in u'\0 \t\r\n\x85\u2028\u2029'
+ followed_by_whitespace = (
+ index + 1 >= len(scalar) or scalar[index + 1] in u'\0 \t\r\n\x85\u2028\u2029'
+ )
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if leading_space or leading_break or trailing_space or trailing_break:
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if special_characters:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False
+ elif space_break:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+ if not self.allow_space_break:
+ allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=False,
+ multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block,
+ )
+
+ # Writers.
+
+ def flush_stream(self):
+ # type: () -> None
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # type: () -> None
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ # type: () -> None
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False):
+ # type: (Any, Any, bool, bool) -> None
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' ' + indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ # type: () -> None
+ indent = self.indent or 0
+ if (
+ not self.indention
+ or self.column > indent
+ or (self.column == indent and not self.whitespace)
+ ):
+ if bool(self.no_newline):
+ self.no_newline = False
+ else:
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' ' * (indent - self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ # type: (Any) -> None
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ # type: (Any) -> None
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ # type: (Any, Any) -> None
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator(u"'", True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if (
+ start + 1 == end
+ and self.column > self.best_width
+ and split
+ and start != 0
+ and end != len(text)
+ ):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u"'":
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u"'":
+ data = u"''"
+ self.column += 2
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = ch == u' '
+ breaks = ch in u'\n\x85\u2028\u2029'
+ end += 1
+ self.write_indicator(u"'", False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'"': u'"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if (
+ ch is None
+ or ch in u'"\\\x85\u2028\u2029\uFEFF'
+ or not (
+ u'\x20' <= ch <= u'\x7E'
+ or (
+ self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD')
+ )
+ )
+ ):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\' + self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if (
+ 0 < end < len(text) - 1
+ and (ch == u' ' or start >= end)
+ and self.column + (end - start) > self.best_width
+ and split
+ ):
+ data = text[start:end] + u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ # type: (Any) -> Any
+ indent = 0
+ indicator = u''
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ indent = self.best_sequence_indent
+ hints += text_type(indent)
+ elif self.root_context:
+ for end in ['\n---', '\n...']:
+ pos = 0
+ while True:
+ pos = text.find(end, pos)
+ if pos == -1:
+ break
+ try:
+ if text[pos + 4] in ' \r\n':
+ break
+ except IndexError:
+ pass
+ pos += 1
+ if pos > -1:
+ break
+ if pos > 0:
+ indent = self.best_sequence_indent
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ indicator = u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ indicator = u'+'
+ hints += indicator
+ return hints, indent, indicator
+
+ def write_folded(self, text):
+ # type: (Any) -> None
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ self.write_indicator(u'>' + hints, True)
+ if _indicator == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029\a':
+ if (
+ not leading_space
+ and ch is not None
+ and ch != u' '
+ and text[start] == u'\n'
+ ):
+ self.write_line_break()
+ leading_space = ch == u' '
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start + 1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029\a':
+ data = text[start:end]
+ self.column += len(data)
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch == u'\a':
+ if end < (len(text) - 1) and not text[end + 2].isspace():
+ self.write_line_break()
+ self.write_indent()
+ end += 2 # \a and the space that is inserted on the fold
+ else:
+ raise EmitterError('unexcpected fold indicator \\a before space')
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in u'\n\x85\u2028\u2029'
+ spaces = ch == u' '
+ end += 1
+
+ def write_literal(self, text, comment=None):
+ # type: (Any, Any) -> None
+ hints, _indent, _indicator = self.determine_block_hints(text)
+ self.write_indicator(u'|' + hints, True)
+ try:
+ comment = comment[1][0]
+ if comment:
+ self.stream.write(comment)
+ except (TypeError, IndexError):
+ pass
+ if _indicator == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ if self.root_context:
+ idnx = self.indent if self.indent is not None else 0
+ self.stream.write(u' ' * (_indent + idnx))
+ else:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if bool(self.encoding):
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = ch in u'\n\x85\u2028\u2029'
+ end += 1
+
+ def write_plain(self, text, split=True):
+ # type: (Any, Any) -> None
+ if self.root_context:
+ if self.requested_indent is not None:
+ self.write_line_break()
+ if self.requested_indent != 0:
+ self.write_indent()
+ else:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start + 1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029': # type: ignore
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ try:
+ self.stream.write(data)
+ except: # NOQA
+ sys.stdout.write(repr(data) + '\n')
+ raise
+ start = end
+ if ch is not None:
+ spaces = ch == u' '
+ breaks = ch in u'\n\x85\u2028\u2029'
+ end += 1
+
+ def write_comment(self, comment, pre=False):
+ # type: (Any, bool) -> None
+ value = comment.value
+ # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value))
+ if not pre and value[-1] == '\n':
+ value = value[:-1]
+ try:
+ # get original column position
+ col = comment.start_mark.column
+ if comment.value and comment.value.startswith('\n'):
+ # never inject extra spaces if the comment starts with a newline
+ # and not a real comment (e.g. if you have an empty line following a key-value
+ col = self.column
+ elif col < self.column + 1:
+ ValueError
+ except ValueError:
+ col = self.column + 1
+ # nprint('post_comment', self.line, self.column, value)
+ try:
+ # at least one space if the current column >= the start column of the comment
+ # but not at the start of a line
+ nr_spaces = col - self.column
+ if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n':
+ nr_spaces = 1
+ value = ' ' * nr_spaces + value
+ try:
+ if bool(self.encoding):
+ value = value.encode(self.encoding)
+ except UnicodeDecodeError:
+ pass
+ self.stream.write(value)
+ except TypeError:
+ raise
+ if not pre:
+ self.write_line_break()
+
+ def write_pre_comment(self, event):
+ # type: (Any) -> bool
+ comments = event.comment[1]
+ if comments is None:
+ return False
+ try:
+ start_events = (MappingStartEvent, SequenceStartEvent)
+ for comment in comments:
+ if isinstance(event, start_events) and getattr(comment, 'pre_done', None):
+ continue
+ if self.column != 0:
+ self.write_line_break()
+ self.write_comment(comment, pre=True)
+ if isinstance(event, start_events):
+ comment.pre_done = True
+ except TypeError:
+ sys.stdout.write('eventtt {} {}'.format(type(event), event))
+ raise
+ return True
+
+ def write_post_comment(self, event):
+ # type: (Any) -> bool
+ if self.event.comment[0] is None:
+ return False
+ comment = event.comment[0]
+ self.write_comment(comment)
+ return True
diff --git a/libs/dynaconf/vendor/ruamel/yaml/error.py b/libs/dynaconf/vendor/ruamel/yaml/error.py
new file mode 100644
index 000000000..b034d022f
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/error.py
@@ -0,0 +1,311 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import warnings
+import textwrap
+
+from .compat import utf8
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Text # NOQA
+
+
+__all__ = [
+ 'FileMark',
+ 'StringMark',
+ 'CommentMark',
+ 'YAMLError',
+ 'MarkedYAMLError',
+ 'ReusedAnchorWarning',
+ 'UnsafeLoaderWarning',
+ 'MarkedYAMLWarning',
+ 'MarkedYAMLFutureWarning',
+]
+
+
+class StreamMark(object):
+ __slots__ = 'name', 'index', 'line', 'column'
+
+ def __init__(self, name, index, line, column):
+ # type: (Any, int, int, int) -> None
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+
+ def __str__(self):
+ # type: () -> Any
+ where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1)
+ return where
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ if self.line != other.line or self.column != other.column:
+ return False
+ if self.name != other.name or self.index != other.index:
+ return False
+ return True
+
+ def __ne__(self, other):
+ # type: (Any) -> bool
+ return not self.__eq__(other)
+
+
+class FileMark(StreamMark):
+ __slots__ = ()
+
+
+class StringMark(StreamMark):
+ __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer'
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ # type: (Any, int, int, int, Any, Any) -> None
+ StreamMark.__init__(self, name, index, line, column)
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ # type: (int, int) -> Any
+ if self.buffer is None: # always False
+ return None
+ head = ""
+ start = self.pointer
+ while start > 0 and self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer - start > max_length / 2 - 1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ""
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end - self.pointer > max_length / 2 - 1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = utf8(self.buffer[start:end])
+ caret = '^'
+ caret = '^ (line: {})'.format(self.line + 1)
+ return (
+ ' ' * indent
+ + head
+ + snippet
+ + tail
+ + '\n'
+ + ' ' * (indent + self.pointer - start + len(head))
+ + caret
+ )
+
+ def __str__(self):
+ # type: () -> Any
+ snippet = self.get_snippet()
+ where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1)
+ if snippet is not None:
+ where += ':\n' + snippet
+ return where
+
+
+class CommentMark(object):
+ __slots__ = ('column',)
+
+ def __init__(self, column):
+ # type: (Any) -> None
+ self.column = column
+
+
+class YAMLError(Exception):
+ pass
+
+
+class MarkedYAMLError(YAMLError):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ # warn is ignored
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ return '\n'.join(lines)
+
+
+class YAMLStreamError(Exception):
+ pass
+
+
+class YAMLWarning(Warning):
+ pass
+
+
+class MarkedYAMLWarning(YAMLWarning):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
+
+
+class ReusedAnchorWarning(YAMLWarning):
+ pass
+
+
+class UnsafeLoaderWarning(YAMLWarning):
+ text = """
+The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
+Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK.
+Alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
+
+In most other cases you should consider using 'safe_load(stream)'"""
+ pass
+
+
+warnings.simplefilter('once', UnsafeLoaderWarning)
+
+
+class MantissaNoDotYAML1_1Warning(YAMLWarning):
+ def __init__(self, node, flt_str):
+ # type: (Any, Any) -> None
+ self.node = node
+ self.flt = flt_str
+
+ def __str__(self):
+ # type: () -> Any
+ line = self.node.start_mark.line
+ col = self.node.start_mark.column
+ return """
+In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
+See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification
+( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
+
+Correct your float: "{}" on line: {}, column: {}
+
+or alternatively include the following in your code:
+
+ import warnings
+ warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
+
+""".format(
+ self.flt, line, col
+ )
+
+
+warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
+
+
+class YAMLFutureWarning(Warning):
+ pass
+
+
+class MarkedYAMLFutureWarning(YAMLFutureWarning):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+ self.warn = warn
+
+ def __str__(self):
+ # type: () -> Any
+ lines = [] # type: List[str]
+ if self.context is not None:
+ lines.append(self.context)
+
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None and self.note:
+ note = textwrap.dedent(self.note)
+ lines.append(note)
+ if self.warn is not None and self.warn:
+ warn = textwrap.dedent(self.warn)
+ lines.append(warn)
+ return '\n'.join(lines)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/events.py b/libs/dynaconf/vendor/ruamel/yaml/events.py
new file mode 100644
index 000000000..58b212190
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/events.py
@@ -0,0 +1,157 @@
+# coding: utf-8
+
+# Abstract classes.
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+
+def CommentCheck():
+ # type: () -> None
+ pass
+
+
+class Event(object):
+ __slots__ = 'start_mark', 'end_mark', 'comment'
+
+ def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
+ # type: (Any, Any, Any) -> None
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ # assert comment is not CommentCheck
+ if comment is CommentCheck:
+ comment = None
+ self.comment = comment
+
+ def __repr__(self):
+ # type: () -> Any
+ attributes = [
+ key
+ for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
+ if hasattr(self, key)
+ ]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes])
+ if self.comment not in [None, CommentCheck]:
+ arguments += ', comment={!r}'.format(self.comment)
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+
+class NodeEvent(Event):
+ __slots__ = ('anchor',)
+
+ def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.anchor = anchor
+
+
+class CollectionStartEvent(NodeEvent):
+ __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ nr_items=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.tag = tag
+ self.implicit = implicit
+ self.flow_style = flow_style
+ self.nr_items = nr_items
+
+
+class CollectionEndEvent(Event):
+ __slots__ = ()
+
+
+# Implementations.
+
+
+class StreamStartEvent(Event):
+ __slots__ = ('encoding',)
+
+ def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.encoding = encoding
+
+
+class StreamEndEvent(Event):
+ __slots__ = ()
+
+
+class DocumentStartEvent(Event):
+ __slots__ = 'explicit', 'version', 'tags'
+
+ def __init__(
+ self,
+ start_mark=None,
+ end_mark=None,
+ explicit=None,
+ version=None,
+ tags=None,
+ comment=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+
+class DocumentEndEvent(Event):
+ __slots__ = ('explicit',)
+
+ def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):
+ # type: (Any, Any, Any, Any) -> None
+ Event.__init__(self, start_mark, end_mark, comment)
+ self.explicit = explicit
+
+
+class AliasEvent(NodeEvent):
+ __slots__ = ()
+
+
+class ScalarEvent(NodeEvent):
+ __slots__ = 'tag', 'implicit', 'value', 'style'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ value,
+ start_mark=None,
+ end_mark=None,
+ style=None,
+ comment=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None
+ NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.style = style
+
+
+class SequenceStartEvent(CollectionStartEvent):
+ __slots__ = ()
+
+
+class SequenceEndEvent(CollectionEndEvent):
+ __slots__ = ()
+
+
+class MappingStartEvent(CollectionStartEvent):
+ __slots__ = ()
+
+
+class MappingEndEvent(CollectionEndEvent):
+ __slots__ = ()
diff --git a/libs/dynaconf/vendor/ruamel/yaml/loader.py b/libs/dynaconf/vendor/ruamel/yaml/loader.py
new file mode 100644
index 000000000..53dd576a2
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/loader.py
@@ -0,0 +1,74 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+
+from .reader import Reader
+from .scanner import Scanner, RoundTripScanner
+from .parser import Parser, RoundTripParser
+from .composer import Composer
+from .constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from .resolver import VersionedResolver
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Optional # NOQA
+ from .compat import StreamTextType, VersionType # NOQA
+
+__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
+
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ BaseConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ SafeConstructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ Reader.__init__(self, stream, loader=self)
+ Scanner.__init__(self, loader=self)
+ Parser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ Constructor.__init__(self, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
+
+
+class RoundTripLoader(
+ Reader,
+ RoundTripScanner,
+ RoundTripParser,
+ Composer,
+ RoundTripConstructor,
+ VersionedResolver,
+):
+ def __init__(self, stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
+ # self.reader = Reader.__init__(self, stream)
+ Reader.__init__(self, stream, loader=self)
+ RoundTripScanner.__init__(self, loader=self)
+ RoundTripParser.__init__(self, loader=self)
+ Composer.__init__(self, loader=self)
+ RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self)
+ VersionedResolver.__init__(self, version, loader=self)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/main.py b/libs/dynaconf/vendor/ruamel/yaml/main.py
new file mode 100644
index 000000000..702333142
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/main.py
@@ -0,0 +1,1534 @@
+# coding: utf-8
+
+from __future__ import absolute_import, unicode_literals, print_function
+
+import sys
+import os
+import warnings
+import glob
+from importlib import import_module
+
+
+import dynaconf.vendor.ruamel as ruamel
+from .error import UnsafeLoaderWarning, YAMLError # NOQA
+
+from .tokens import * # NOQA
+from .events import * # NOQA
+from .nodes import * # NOQA
+
+from .loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA
+from .dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA
+from .compat import StringIO, BytesIO, with_metaclass, PY3, nprint
+from .resolver import VersionedResolver, Resolver # NOQA
+from .representer import (
+ BaseRepresenter,
+ SafeRepresenter,
+ Representer,
+ RoundTripRepresenter,
+)
+from .constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
+from .loader import Loader as UnsafeLoader
+
+if False: # MYPY
+ from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA
+ from .compat import StreamType, StreamTextType, VersionType # NOQA
+
+ if PY3:
+ from pathlib import Path
+ else:
+ Path = Any
+
+try:
+ from _ruamel_yaml import CParser, CEmitter # type: ignore
+except: # NOQA
+ CParser = CEmitter = None
+
+# import io
+
+enforce = object()
+
+
+# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
+# subset of abbreviations, which should be all caps according to PEP8
+
+
+class YAML(object):
+ def __init__(
+ self, _kw=enforce, typ=None, pure=False, output=None, plug_ins=None # input=None,
+ ):
+ # type: (Any, Optional[Text], Any, Any, Any) -> None
+ """
+ _kw: not used, forces keyword arguments in 2.7 (in 3 you can do (*, safe_load=..)
+ typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default)
+ 'safe' -> SafeLoader/SafeDumper,
+ 'unsafe' -> normal/unsafe Loader/Dumper
+ 'base' -> baseloader
+ pure: if True only use Python modules
+ input/output: needed to work as context manager
+ plug_ins: a list of plug-in files
+ """
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.__init__() takes no positional argument but at least '
+ 'one was given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+
+ self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ])
+ self.pure = pure
+
+ # self._input = input
+ self._output = output
+ self._context_manager = None # type: Any
+
+ self.plug_ins = [] # type: List[Any]
+ for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
+ file_name = pu.replace(os.sep, '.')
+ self.plug_ins.append(import_module(file_name))
+ self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any
+ self.allow_unicode = True
+ self.Reader = None # type: Any
+ self.Representer = None # type: Any
+ self.Constructor = None # type: Any
+ self.Scanner = None # type: Any
+ self.Serializer = None # type: Any
+ self.default_flow_style = None # type: Any
+ typ_found = 1
+ setup_rt = False
+ if 'rt' in self.typ:
+ setup_rt = True
+ elif 'safe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.SafeRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.SafeConstructor
+ elif 'base' in self.typ:
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Representer = ruamel.yaml.representer.BaseRepresenter
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.BaseConstructor
+ elif 'unsafe' in self.typ:
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
+ self.Representer = ruamel.yaml.representer.Representer
+ self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.Constructor
+ else:
+ setup_rt = True
+ typ_found = 0
+ if setup_rt:
+ self.default_flow_style = False
+ # no optimized rt-dumper yet
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.Representer = ruamel.yaml.representer.RoundTripRepresenter
+ self.Scanner = ruamel.yaml.scanner.RoundTripScanner
+ # no optimized rt-parser yet
+ self.Parser = ruamel.yaml.parser.RoundTripParser
+ self.Composer = ruamel.yaml.composer.Composer
+ self.Constructor = ruamel.yaml.constructor.RoundTripConstructor
+ del setup_rt
+ self.stream = None
+ self.canonical = None
+ self.old_indent = None
+ self.width = None
+ self.line_break = None
+
+ self.map_indent = None
+ self.sequence_indent = None
+ self.sequence_dash_offset = 0
+ self.compact_seq_seq = None
+ self.compact_seq_map = None
+ self.sort_base_mapping_type_on_output = None # default: sort
+
+ self.top_level_colon_align = None
+ self.prefix_colon = None
+ self.version = None
+ self.preserve_quotes = None
+ self.allow_duplicate_keys = False # duplicate keys in map, set
+ self.encoding = 'utf-8'
+ self.explicit_start = None
+ self.explicit_end = None
+ self.tags = None
+ self.default_style = None
+ self.top_level_block_style_scalar_no_indent_error_1_1 = False
+ # directives end indicator with single scalar document
+ self.scalar_after_indicator = None
+ # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}]
+ self.brace_single_entry_mapping_in_flow_sequence = False
+ for module in self.plug_ins:
+ if getattr(module, 'typ', None) in self.typ:
+ typ_found += 1
+ module.init_typ(self)
+ break
+ if typ_found == 0:
+ raise NotImplementedError(
+ 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
+ )
+
+ @property
+ def reader(self):
+ # type: () -> Any
+ try:
+ return self._reader # type: ignore
+ except AttributeError:
+ self._reader = self.Reader(None, loader=self)
+ return self._reader
+
+ @property
+ def scanner(self):
+ # type: () -> Any
+ try:
+ return self._scanner # type: ignore
+ except AttributeError:
+ self._scanner = self.Scanner(loader=self)
+ return self._scanner
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Parser is not CParser:
+ setattr(self, attr, self.Parser(loader=self))
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ else:
+ # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'):
+ # # pathlib.Path() instance
+ # setattr(self, attr, CParser(self._stream))
+ # else:
+ setattr(self, attr, CParser(self._stream))
+ # self._parser = self._composer = self
+ # nprint('scanner', self.loader.scanner)
+
+ return getattr(self, attr)
+
+ @property
+ def composer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Composer(loader=self))
+ return getattr(self, attr)
+
+ @property
+ def constructor(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self)
+ cnst.allow_duplicate_keys = self.allow_duplicate_keys
+ setattr(self, attr, cnst)
+ return getattr(self, attr)
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self.Resolver(version=self.version, loader=self))
+ return getattr(self, attr)
+
+ @property
+ def emitter(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ if self.Emitter is not CEmitter:
+ _emitter = self.Emitter(
+ None,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ prefix_colon=self.prefix_colon,
+ brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA
+ dumper=self,
+ )
+ setattr(self, attr, _emitter)
+ if self.map_indent is not None:
+ _emitter.best_map_indent = self.map_indent
+ if self.sequence_indent is not None:
+ _emitter.best_sequence_indent = self.sequence_indent
+ if self.sequence_dash_offset is not None:
+ _emitter.sequence_dash_offset = self.sequence_dash_offset
+ # _emitter.block_seq_indent = self.sequence_dash_offset
+ if self.compact_seq_seq is not None:
+ _emitter.compact_seq_seq = self.compact_seq_seq
+ if self.compact_seq_map is not None:
+ _emitter.compact_seq_map = self.compact_seq_map
+ else:
+ if getattr(self, '_stream', None) is None:
+ # wait for the stream
+ return None
+ return None
+ return getattr(self, attr)
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(
+ self,
+ attr,
+ self.Serializer(
+ encoding=self.encoding,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ dumper=self,
+ ),
+ )
+ return getattr(self, attr)
+
+ @property
+ def representer(self):
+ # type: () -> Any
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ repres = self.Representer(
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ dumper=self,
+ )
+ if self.sort_base_mapping_type_on_output is not None:
+ repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output
+ setattr(self, attr, repres)
+ return getattr(self, attr)
+
+ # separate output resolver?
+
+ # def load(self, stream=None):
+ # if self._context_manager:
+ # if not self._input:
+ # raise TypeError("Missing input stream while dumping from context manager")
+ # for data in self._context_manager.load():
+ # yield data
+ # return
+ # if stream is None:
+ # raise TypeError("Need a stream argument when not loading from context manager")
+ # return self.load_one(stream)
+
+ def load(self, stream):
+ # type: (Union[Path, StreamTextType]) -> Any
+ """
+ at this point you either have the non-pure Parser (which has its own reader and
+ scanner) or you have the pure Parser.
+ If the pure Parser is set, then set the Reader and Scanner, if not already set.
+ If either the Scanner or Reader are set, you cannot use the non-pure Parser,
+ so reset it to the pure parser and set the Reader resp. Scanner if necessary
+ """
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('rb') as fp:
+ return self.load(fp)
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ return constructor.get_single_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def load_all(self, stream, _kw=enforce): # , skip=None):
+ # type: (Union[Path, StreamTextType], Any) -> Any
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.__init__() takes no positional argument but at least '
+ 'one was given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ if not hasattr(stream, 'read') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('r') as fp:
+ for d in self.load_all(fp, _kw=enforce):
+ yield d
+ return
+ # if skip is None:
+ # skip = []
+ # elif isinstance(skip, int):
+ # skip = [skip]
+ constructor, parser = self.get_constructor_parser(stream)
+ try:
+ while constructor.check_data():
+ yield constructor.get_data()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ def get_constructor_parser(self, stream):
+ # type: (StreamTextType) -> Any
+ """
+ the old cyaml needs special setup, and therefore the stream
+ """
+ if self.Parser is not CParser:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.reader.stream = stream
+ else:
+ if self.Reader is not None:
+ if self.Scanner is None:
+ self.Scanner = ruamel.yaml.scanner.Scanner
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ elif self.Scanner is not None:
+ if self.Reader is None:
+ self.Reader = ruamel.yaml.reader.Reader
+ self.Parser = ruamel.yaml.parser.Parser
+ self.reader.stream = stream
+ else:
+ # combined C level reader>scanner>parser
+ # does some calls to the resolver, e.g. BaseResolver.descend_resolver
+ # if you just initialise the CParser, to much of resolver.py
+ # is actually used
+ rslvr = self.Resolver
+ # if rslvr is ruamel.yaml.resolver.VersionedResolver:
+ # rslvr = ruamel.yaml.resolver.Resolver
+
+ class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
+ def __init__(selfx, stream, version=self.version, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA
+ CParser.__init__(selfx, stream)
+ selfx._parser = selfx._composer = selfx
+ self.Constructor.__init__(selfx, loader=selfx)
+ selfx.allow_duplicate_keys = self.allow_duplicate_keys
+ rslvr.__init__(selfx, version=version, loadumper=selfx)
+
+ self._stream = stream
+ loader = XLoader(stream)
+ return loader, loader
+ return self.constructor, self.parser
+
+ def dump(self, data, stream=None, _kw=enforce, transform=None):
+ # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+ if self._context_manager:
+ if not self._output:
+ raise TypeError('Missing output stream while dumping from context manager')
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.dump() takes one positional argument but at least '
+ 'two were given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ if transform is not None:
+ raise TypeError(
+ '{}.dump() in the context manager cannot have transform keyword '
+ ''.format(self.__class__.__name__)
+ )
+ self._context_manager.dump(data)
+ else: # old style
+ if stream is None:
+ raise TypeError('Need a stream argument when not dumping from context manager')
+ return self.dump_all([data], stream, _kw, transform=transform)
+
+ def dump_all(self, documents, stream, _kw=enforce, transform=None):
+ # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+ if self._context_manager:
+ raise NotImplementedError
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.dump(_all) takes two positional argument but at least '
+ 'three were given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ self._output = stream
+ self._context_manager = YAMLContextManager(self, transform=transform)
+ for data in documents:
+ self._context_manager.dump(data)
+ self._context_manager.teardown_output()
+ self._output = None
+ self._context_manager = None
+
+ def Xdump_all(self, documents, stream, _kw=enforce, transform=None):
+ # type: (Any, Union[Path, StreamType], Any, Any) -> Any
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ """
+ if not hasattr(stream, 'write') and hasattr(stream, 'open'):
+ # pathlib.Path() instance
+ with stream.open('w') as fp:
+ return self.dump_all(documents, fp, _kw, transform=transform)
+ if _kw is not enforce:
+ raise TypeError(
+ '{}.dump(_all) takes two positional argument but at least '
+ 'three were given ({!r})'.format(self.__class__.__name__, _kw)
+ )
+ # The stream should have the methods `write` and possibly `flush`.
+ if self.top_level_colon_align is True:
+ tlca = max([len(str(x)) for x in documents[0]]) # type: Any
+ else:
+ tlca = self.top_level_colon_align
+ if transform is not None:
+ fstream = stream
+ if self.encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ serializer, representer, emitter = self.get_serializer_representer_emitter(
+ stream, tlca
+ )
+ try:
+ self.serializer.open()
+ for data in documents:
+ try:
+ self.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ self.serializer.close()
+ finally:
+ try:
+ self.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ delattr(self, '_serializer')
+ delattr(self, '_emitter')
+ if transform:
+ val = stream.getvalue()
+ if self.encoding:
+ val = val.decode(self.encoding)
+ if fstream is None:
+ transform(val)
+ else:
+ fstream.write(transform(val))
+ return None
+
+ def get_serializer_representer_emitter(self, stream, tlca):
+ # type: (StreamType, Any) -> Any
+ # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler
+ if self.Emitter is not CEmitter:
+ if self.Serializer is None:
+ self.Serializer = ruamel.yaml.serializer.Serializer
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ if self.Serializer is not None:
+ # cannot set serializer with CEmitter
+ self.Emitter = ruamel.yaml.emitter.Emitter
+ self.emitter.stream = stream
+ self.emitter.top_level_colon_align = tlca
+ if self.scalar_after_indicator is not None:
+ self.emitter.scalar_after_indicator = self.scalar_after_indicator
+ return self.serializer, self.representer, self.emitter
+ # C routines
+
+ rslvr = (
+ ruamel.yaml.resolver.BaseResolver
+ if 'base' in self.typ
+ else ruamel.yaml.resolver.Resolver
+ )
+
+ class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
+ def __init__(
+ selfx,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ selfx,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
+ selfx._emitter = selfx._serializer = selfx._representer = selfx
+ self.Representer.__init__(
+ selfx, default_style=default_style, default_flow_style=default_flow_style
+ )
+ rslvr.__init__(selfx)
+
+ self._stream = stream
+ dumper = XDumper(
+ stream,
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ )
+ self._emitter = self._serializer = dumper
+ return dumper, dumper, dumper
+
+ # basic types
+ def map(self, **kw):
+ # type: (Any) -> Any
+ if 'rt' in self.typ:
+ from dynaconf.vendor.ruamel.yaml.comments import CommentedMap
+
+ return CommentedMap(**kw)
+ else:
+ return dict(**kw)
+
+ def seq(self, *args):
+ # type: (Any) -> Any
+ if 'rt' in self.typ:
+ from dynaconf.vendor.ruamel.yaml.comments import CommentedSeq
+
+ return CommentedSeq(*args)
+ else:
+ return list(*args)
+
+ # helpers
+ def official_plug_ins(self):
+ # type: () -> Any
+ bd = os.path.dirname(__file__)
+ gpbd = os.path.dirname(os.path.dirname(bd))
+ res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
+ return res
+
+ def register_class(self, cls):
+ # type:(Any) -> Any
+ """
+ register a class for dumping loading
+ - if it has attribute yaml_tag use that to register, else use class name
+ - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes
+ as mapping
+ """
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ self.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer, data):
+ # type: (Any, Any) -> Any
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
+
+ self.representer.add_representer(cls, t_y)
+ try:
+ self.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor, node):
+ # type: (Any, Any) -> Any
+ return constructor.construct_yaml_object(node, cls)
+
+ self.constructor.add_constructor(tag, f_y)
+ return cls
+
+ def parse(self, stream):
+ # type: (StreamTextType) -> Any
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ _, parser = self.get_constructor_parser(stream)
+ try:
+ while parser.check_event():
+ yield parser.get_event()
+ finally:
+ parser.dispose()
+ try:
+ self._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ self._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+ # ### context manager
+
+ def __enter__(self):
+ # type: () -> Any
+ self._context_manager = YAMLContextManager(self)
+ return self
+
+ def __exit__(self, typ, value, traceback):
+ # type: (Any, Any, Any) -> None
+ if typ:
+ nprint('typ', typ)
+ self._context_manager.teardown_output()
+ # self._context_manager.teardown_input()
+ self._context_manager = None
+
+ # ### backwards compatibility
+ def _indent(self, mapping=None, sequence=None, offset=None):
+ # type: (Any, Any, Any) -> None
+ if mapping is not None:
+ self.map_indent = mapping
+ if sequence is not None:
+ self.sequence_indent = sequence
+ if offset is not None:
+ self.sequence_dash_offset = offset
+
+ @property
+ def indent(self):
+ # type: () -> Any
+ return self._indent
+
+ @indent.setter
+ def indent(self, val):
+ # type: (Any) -> None
+ self.old_indent = val
+
+ @property
+ def block_seq_indent(self):
+ # type: () -> Any
+ return self.sequence_dash_offset
+
+ @block_seq_indent.setter
+ def block_seq_indent(self, val):
+ # type: (Any) -> None
+ self.sequence_dash_offset = val
+
+ def compact(self, seq_seq=None, seq_map=None):
+ # type: (Any, Any) -> None
+ self.compact_seq_seq = seq_seq
+ self.compact_seq_map = seq_map
+
+
+class YAMLContextManager(object):
+ def __init__(self, yaml, transform=None):
+ # type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None
+ self._yaml = yaml
+ self._output_inited = False
+ self._output_path = None
+ self._output = self._yaml._output
+ self._transform = transform
+
+ # self._input_inited = False
+ # self._input = input
+ # self._input_path = None
+ # self._transform = yaml.transform
+ # self._fstream = None
+
+ if not hasattr(self._output, 'write') and hasattr(self._output, 'open'):
+ # pathlib.Path() instance, open with the same mode
+ self._output_path = self._output
+ self._output = self._output_path.open('w')
+
+ # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'):
+ # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'):
+ # # pathlib.Path() instance, open with the same mode
+ # self._input_path = self._input
+ # self._input = self._input_path.open('r')
+
+ if self._transform is not None:
+ self._fstream = self._output
+ if self._yaml.encoding is None:
+ self._output = StringIO()
+ else:
+ self._output = BytesIO()
+
+ def teardown_output(self):
+ # type: () -> None
+ if self._output_inited:
+ self._yaml.serializer.close()
+ else:
+ return
+ try:
+ self._yaml.emitter.dispose()
+ except AttributeError:
+ raise
+ # self.dumper.dispose() # cyaml
+ try:
+ delattr(self._yaml, '_serializer')
+ delattr(self._yaml, '_emitter')
+ except AttributeError:
+ raise
+ if self._transform:
+ val = self._output.getvalue()
+ if self._yaml.encoding:
+ val = val.decode(self._yaml.encoding)
+ if self._fstream is None:
+ self._transform(val)
+ else:
+ self._fstream.write(self._transform(val))
+ self._fstream.flush()
+ self._output = self._fstream # maybe not necessary
+ if self._output_path is not None:
+ self._output.close()
+
+ def init_output(self, first_data):
+ # type: (Any) -> None
+ if self._yaml.top_level_colon_align is True:
+ tlca = max([len(str(x)) for x in first_data]) # type: Any
+ else:
+ tlca = self._yaml.top_level_colon_align
+ self._yaml.get_serializer_representer_emitter(self._output, tlca)
+ self._yaml.serializer.open()
+ self._output_inited = True
+
+ def dump(self, data):
+ # type: (Any) -> None
+ if not self._output_inited:
+ self.init_output(data)
+ try:
+ self._yaml.representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+
+ # def teardown_input(self):
+ # pass
+ #
+ # def init_input(self):
+ # # set the constructor and parser on YAML() instance
+ # self._yaml.get_constructor_parser(stream)
+ #
+ # def load(self):
+ # if not self._input_inited:
+ # self.init_input()
+ # try:
+ # while self._yaml.constructor.check_data():
+ # yield self._yaml.constructor.get_data()
+ # finally:
+ # parser.dispose()
+ # try:
+ # self._reader.reset_reader() # type: ignore
+ # except AttributeError:
+ # pass
+ # try:
+ # self._scanner.reset_scanner() # type: ignore
+ # except AttributeError:
+ # pass
+
+
+def yaml_object(yml):
+ # type: (Any) -> Any
+ """ decorator for classes that needs to dump/load objects
+ The tag for such objects is taken from the class attribute yaml_tag (or the
+ class name in lowercase in case unavailable)
+ If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
+ loading, default routines (dumping a mapping of the attributes) used otherwise.
+ """
+
+ def yo_deco(cls):
+ # type: (Any) -> Any
+ tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
+ try:
+ yml.representer.add_representer(cls, cls.to_yaml)
+ except AttributeError:
+
+ def t_y(representer, data):
+ # type: (Any, Any) -> Any
+ return representer.represent_yaml_object(
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
+
+ yml.representer.add_representer(cls, t_y)
+ try:
+ yml.constructor.add_constructor(tag, cls.from_yaml)
+ except AttributeError:
+
+ def f_y(constructor, node):
+ # type: (Any, Any) -> Any
+ return constructor.construct_yaml_object(node, cls)
+
+ yml.constructor.add_constructor(tag, f_y)
+ return cls
+
+ return yo_deco
+
+
+########################################################################################
+
+
+def scan(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.scanner.check_token():
+ yield loader.scanner.get_token()
+ finally:
+ loader._parser.dispose()
+
+
+def parse(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader._parser.check_event():
+ yield loader._parser.get_event()
+ finally:
+ loader._parser.dispose()
+
+
+def compose(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+
+def compose_all(stream, Loader=Loader):
+ # type: (StreamTextType, Any) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader._composer.get_node()
+ finally:
+ loader._parser.dispose()
+
+
+def load(stream, Loader=None, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Any, Optional[VersionType], Any) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes)
+ try:
+ return loader._constructor.get_single_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def load_all(stream, Loader=None, version=None, preserve_quotes=None):
+ # type: (Optional[StreamTextType], Any, Optional[VersionType], Optional[bool]) -> Any # NOQA
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ if Loader is None:
+ warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2)
+ Loader = UnsafeLoader
+ loader = Loader(stream, version, preserve_quotes=preserve_quotes)
+ try:
+ while loader._constructor.check_data():
+ yield loader._constructor.get_data()
+ finally:
+ loader._parser.dispose()
+ try:
+ loader._reader.reset_reader()
+ except AttributeError:
+ pass
+ try:
+ loader._scanner.reset_scanner()
+ except AttributeError:
+ pass
+
+
+def safe_load(stream, version=None):
+ # type: (StreamTextType, Optional[VersionType]) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, SafeLoader, version)
+
+
+def safe_load_all(stream, version=None):
+ # type: (StreamTextType, Optional[VersionType]) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader, version)
+
+
+def round_trip_load(stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ Resolve only basic YAML tags.
+ """
+ return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def round_trip_load_all(stream, version=None, preserve_quotes=None):
+ # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
+
+
+def emit(
+ events,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+):
+ # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ )
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+enc = None if PY3 else 'utf-8'
+
+
+def serialize_all(
+ nodes,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ version=version,
+ tags=tags,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ )
+ try:
+ dumper._serializer.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ # type: (Any, Optional[StreamType], Any, Any) -> Any
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+
+def dump_all(
+ documents,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Optional[str] # NOQA
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if top_level_colon_align is True:
+ top_level_colon_align = max([len(str(x)) for x in documents[0]])
+ if stream is None:
+ if encoding is None:
+ stream = StringIO()
+ else:
+ stream = BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(
+ stream,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+ try:
+ dumper._serializer.open()
+ for data in documents:
+ try:
+ dumper._representer.represent(data)
+ except AttributeError:
+ # nprint(dir(dumper._representer))
+ raise
+ dumper._serializer.close()
+ finally:
+ try:
+ dumper._emitter.dispose()
+ except AttributeError:
+ raise
+ dumper.dispose() # cyaml
+ if getvalue is not None:
+ return getvalue()
+ return None
+
+
+def dump(
+ data,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[str] # NOQA
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+
+ default_style ∈ None, '', '"', "'", '|', '>'
+
+ """
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ )
+
+
+def safe_dump_all(documents, stream=None, **kwds):
+ # type: (Any, Optional[StreamType], Any) -> Optional[str]
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+
+def safe_dump(data, stream=None, **kwds):
+ # type: (Any, Optional[StreamType], Any) -> Optional[str]
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+
+def round_trip_dump(
+ data,
+ stream=None,
+ Dumper=RoundTripDumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[str] # NOQA
+ allow_unicode = True if allow_unicode is None else allow_unicode
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
+
+
+# Loader/Dumper are no longer composites, to get to the associated
+# Resolver()/Representer(), etc., you need to instantiate the class
+
+
+def add_implicit_resolver(
+ tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
+):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_implicit_resolver(tag, regexp, first)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_implicit_resolver'):
+ Loader.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_implicit_resolver'):
+ Dumper.add_implicit_resolver(tag, regexp, first)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
+ Resolver.add_implicit_resolver(tag, regexp, first)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None and Dumper is None:
+ resolver.add_path_resolver(tag, path, kind)
+ return
+ if Loader:
+ if hasattr(Loader, 'add_path_resolver'):
+ Loader.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+ if Dumper:
+ if hasattr(Dumper, 'add_path_resolver'):
+ Dumper.add_path_resolver(tag, path, kind)
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
+ Resolver.add_path_resolver(tag, path, kind)
+ else:
+ raise NotImplementedError
+
+
+def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add an object constructor for the given tag.
+ object_onstructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_constructor(tag, object_constructor)
+ else:
+ if hasattr(Loader, 'add_constructor'):
+ Loader.add_constructor(tag, object_constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, Loader):
+ Constructor.add_constructor(tag, object_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_constructor(tag, object_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ if False and hasattr(Loader, 'add_multi_constructor'):
+ Loader.add_multi_constructor(tag_prefix, constructor)
+ return
+ if issubclass(Loader, BaseLoader):
+ BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, SafeLoader):
+ SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, ruamel.yaml.loader.Loader):
+ Constructor.add_multi_constructor(tag_prefix, multi_constructor)
+ elif issubclass(Loader, RoundTripLoader):
+ RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ raise NotImplementedError
+
+
+def add_representer(data_type, object_representer, Dumper=None, representer=Representer):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a representer for the given type.
+ object_representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_representer(data_type, object_representer)
+ else:
+ if hasattr(Dumper, 'add_representer'):
+ Dumper.add_representer(data_type, object_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_representer(data_type, object_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_representer(data_type, object_representer)
+ else:
+ raise NotImplementedError
+
+
+# this code currently not tested
+def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer):
+ # type: (Any, Any, Any, Any) -> None
+ """
+ Add a representer for the given type.
+ multi_representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ if Dumper is None:
+ representer.add_multi_representer(data_type, multi_representer)
+ else:
+ if hasattr(Dumper, 'add_multi_representer'):
+ Dumper.add_multi_representer(data_type, multi_representer)
+ return
+ if issubclass(Dumper, BaseDumper):
+ BaseRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, SafeDumper):
+ SafeRepresenter.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, Dumper):
+ Representer.add_multi_representer(data_type, multi_representer)
+ elif issubclass(Dumper, RoundTripDumper):
+ RoundTripRepresenter.add_multi_representer(data_type, multi_representer)
+ else:
+ raise NotImplementedError
+
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+
+ def __init__(cls, name, bases, kwds):
+ # type: (Any, Any, Any) -> None
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
+ cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
+
+
+class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_constructor = Constructor
+ yaml_representer = Representer
+
+ yaml_tag = None # type: Any
+ yaml_flow_style = None # type: Any
+
+ @classmethod
+ def from_yaml(cls, constructor, node):
+ # type: (Any, Any) -> Any
+ """
+ Convert a representation node to a Python object.
+ """
+ return constructor.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, representer, data):
+ # type: (Any, Any) -> Any
+ """
+ Convert a Python object to a representation node.
+ """
+ return representer.represent_yaml_object(
+ cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
+ )
diff --git a/libs/dynaconf/vendor/ruamel/yaml/nodes.py b/libs/dynaconf/vendor/ruamel/yaml/nodes.py
new file mode 100644
index 000000000..da86e9c85
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/nodes.py
@@ -0,0 +1,131 @@
+# coding: utf-8
+
+from __future__ import print_function
+
+import sys
+from .compat import string_types
+
+if False: # MYPY
+ from typing import Dict, Any, Text # NOQA
+
+
+class Node(object):
+ __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor'
+
+ def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None):
+ # type: (Any, Any, Any, Any, Any, Any) -> None
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.comment = comment
+ self.anchor = anchor
+
+ def __repr__(self):
+ # type: () -> str
+ value = self.value
+ # if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ # else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+ def dump(self, indent=0):
+ # type: (int) -> None
+ if isinstance(self.value, string_types):
+ sys.stdout.write(
+ '{}{}(tag={!r}, value={!r})\n'.format(
+ ' ' * indent, self.__class__.__name__, self.tag, self.value
+ )
+ )
+ if self.comment:
+ sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
+ return
+ sys.stdout.write(
+ '{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag)
+ )
+ if self.comment:
+ sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment))
+ for v in self.value:
+ if isinstance(v, tuple):
+ for v1 in v:
+ v1.dump(indent + 1)
+ elif isinstance(v, Node):
+ v.dump(indent + 1)
+ else:
+ sys.stdout.write('Node value type? {}\n'.format(type(v)))
+
+
+class ScalarNode(Node):
+ """
+ styles:
+ ? -> set() ? key, no value
+ " -> double quoted
+ ' -> single quoted
+ | -> literal style
+ > -> folding style
+ """
+
+ __slots__ = ('style',)
+ id = 'scalar'
+
+ def __init__(
+ self, tag, value, start_mark=None, end_mark=None, style=None, comment=None, anchor=None
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor)
+ self.style = style
+
+
+class CollectionNode(Node):
+ __slots__ = ('flow_style',)
+
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
+ self.flow_style = flow_style
+ self.anchor = anchor
+
+
+class SequenceNode(CollectionNode):
+ __slots__ = ()
+ id = 'sequence'
+
+
+class MappingNode(CollectionNode):
+ __slots__ = ('merge',)
+ id = 'mapping'
+
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
+ # type: (Any, Any, Any, Any, Any, Any, Any) -> None
+ CollectionNode.__init__(
+ self, tag, value, start_mark, end_mark, flow_style, comment, anchor
+ )
+ self.merge = None
diff --git a/libs/dynaconf/vendor/ruamel/yaml/parser.py b/libs/dynaconf/vendor/ruamel/yaml/parser.py
new file mode 100644
index 000000000..3d67a1c4d
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/parser.py
@@ -0,0 +1,802 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document*
+# STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content |
+# indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+# BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START
+# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR
+# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START
+# FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START
+# FLOW-MAPPING-START KEY }
+
+# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py
+# only to not do anything with the package afterwards
+# and for Jython too
+
+
+from .error import MarkedYAMLError
+from .tokens import * # NOQA
+from .events import * # NOQA
+from .scanner import Scanner, RoundTripScanner, ScannerError # NOQA
+from .compat import utf8, nprint, nprintf # NOQA
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+__all__ = ['Parser', 'RoundTripParser', 'ParserError']
+
+
+class ParserError(MarkedYAMLError):
+ pass
+
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {u'!': u'!', u'!!': u'tag:yaml.org,2002:'}
+
+ def __init__(self, loader):
+ # type: (Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_parser', None) is None:
+ self.loader._parser = self
+ self.reset_parser()
+
+ def reset_parser(self):
+ # type: () -> None
+ # Reset the state attributes (to clear self-references)
+ self.current_event = None
+ self.tag_handles = {} # type: Dict[Any, Any]
+ self.states = [] # type: List[Any]
+ self.marks = [] # type: List[Any]
+ self.state = self.parse_stream_start # type: Any
+
+ def dispose(self):
+ # type: () -> None
+ self.reset_parser()
+
+ @property
+ def scanner(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.scanner
+ return self.loader._scanner
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver
+ return self.loader._resolver
+
+ def check_event(self, *choices):
+ # type: (Any) -> bool
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # type: () -> Any
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # type: () -> Any
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document*
+ # STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+ # type: () -> Any
+ # Parse the stream start.
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+ # type: () -> Any
+ # Parse an implicit document.
+ if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark, explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+ # type: () -> Any
+ # Parse any extra document end indicators.
+ while self.scanner.check_token(DocumentEndToken):
+ self.scanner.get_token()
+ # Parse an explicit document.
+ if not self.scanner.check_token(StreamEndToken):
+ token = self.scanner.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.scanner.check_token(DocumentStartToken):
+ raise ParserError(
+ None,
+ None,
+ "expected '<document start>', but found %r" % self.scanner.peek_token().id,
+ self.scanner.peek_token().start_mark,
+ )
+ token = self.scanner.get_token()
+ end_mark = token.end_mark
+ # if self.loader is not None and \
+ # end_mark.line != self.scanner.peek_token().start_mark.line:
+ # self.loader.scalar_after_indicator = False
+ event = DocumentStartEvent(
+ start_mark, end_mark, explicit=True, version=version, tags=tags
+ ) # type: Any
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.scanner.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+ # type: () -> Any
+ # Parse the document end.
+ token = self.scanner.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.scanner.check_token(DocumentEndToken):
+ token = self.scanner.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark, explicit=explicit)
+
+ # Prepare the next state.
+ if self.resolver.processing_version == (1, 1):
+ self.state = self.parse_document_start
+ else:
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_document_content(self):
+ # type: () -> Any
+ if self.scanner.check_token(
+ DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken
+ ):
+ event = self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ # type: () -> Any
+ yaml_version = None
+ self.tag_handles = {}
+ while self.scanner.check_token(DirectiveToken):
+ token = self.scanner.get_token()
+ if token.name == u'YAML':
+ if yaml_version is not None:
+ raise ParserError(
+ None, None, 'found duplicate YAML directive', token.start_mark
+ )
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(
+ None,
+ None,
+ 'found incompatible YAML document (version 1.* is ' 'required)',
+ token.start_mark,
+ )
+ yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(
+ None, None, 'duplicate tag handle %r' % utf8(handle), token.start_mark
+ )
+ self.tag_handles[handle] = prefix
+ if bool(self.tag_handles):
+ value = yaml_version, self.tag_handles.copy() # type: Any
+ else:
+ value = yaml_version, None
+ if self.loader is not None and hasattr(self.loader, 'tags'):
+ self.loader.version = yaml_version
+ if self.loader.tags is None:
+ self.loader.tags = {}
+ for k in self.tag_handles:
+ self.loader.tags[k] = self.tag_handles[k]
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ # type: () -> Any
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ # type: () -> Any
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ # type: () -> Any
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def transform_tag(self, handle, suffix):
+ # type: (Any, Any) -> Any
+ return self.tag_handles[handle] + suffix
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ # type: (bool, bool) -> Any
+ if self.scanner.check_token(AliasToken):
+ token = self.scanner.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark) # type: Any
+ self.state = self.states.pop()
+ return event
+
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.scanner.check_token(TagToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.scanner.check_token(AnchorToken):
+ token = self.scanner.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError(
+ 'while parsing a node',
+ start_mark,
+ 'found undefined tag handle %r' % utf8(handle),
+ tag_mark,
+ )
+ tag = self.transform_tag(handle, suffix)
+ else:
+ tag = suffix
+ # if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag'
+ # and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.scanner.peek_token().start_mark
+ event = None
+ implicit = tag is None or tag == u'!'
+ if indentless_sequence and self.scanner.check_token(BlockEntryToken):
+ comment = None
+ pt = self.scanner.peek_token()
+ if pt.comment and pt.comment[0]:
+ comment = [pt.comment[0], []]
+ pt.comment[0] = None
+ end_mark = self.scanner.peek_token().end_mark
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_indentless_sequence_entry
+ return event
+
+ if self.scanner.check_token(ScalarToken):
+ token = self.scanner.get_token()
+ # self.scanner.peek_token_same_line_comment(token)
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ # nprint('se', token.value, token.comment)
+ event = ScalarEvent(
+ anchor,
+ tag,
+ implicit,
+ token.value,
+ start_mark,
+ end_mark,
+ style=token.style,
+ comment=token.comment,
+ )
+ self.state = self.states.pop()
+ elif self.scanner.check_token(FlowSequenceStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = SequenceStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.scanner.check_token(FlowMappingStartToken):
+ pt = self.scanner.peek_token()
+ end_mark = pt.end_mark
+ event = MappingStartEvent(
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.scanner.check_token(BlockSequenceStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ # should inserting the comment be dependent on the
+ # indentation?
+ pt = self.scanner.peek_token()
+ comment = pt.comment
+ # nprint('pt0', type(pt))
+ if comment is None or comment[1] is None:
+ comment = pt.split_comment()
+ # nprint('pt1', comment)
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.scanner.check_token(BlockMappingStartToken):
+ end_mark = self.scanner.peek_token().start_mark
+ comment = self.scanner.peek_token().comment
+ event = MappingStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a %s node' % node,
+ start_mark,
+ 'expected the node content, but found %r' % token.id,
+ token.start_mark,
+ )
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
+ # BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ # move any comment from start token
+ # token.move_comment(self.scanner.peek_token())
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ # type: () -> Any
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ if not self.scanner.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block collection',
+ self.marks[-1],
+ 'expected <block end>, but found %r' % token.id,
+ token.start_mark,
+ )
+ token = self.scanner.get_token() # BlockEndToken
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ # indentless_sequence?
+ # sequence:
+ # - entry
+ # - nested
+
+ def parse_indentless_sequence_entry(self):
+ # type: () -> Any
+ if self.scanner.check_token(BlockEntryToken):
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ if not self.scanner.check_token(
+ BlockEntryToken, KeyToken, ValueToken, BlockEndToken
+ ):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.scanner.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark, comment=token.comment)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ # type: () -> Any
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if self.resolver.processing_version > (1, 1) and self.scanner.check_token(ValueToken):
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+ if not self.scanner.check_token(BlockEndToken):
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a block mapping',
+ self.marks[-1],
+ 'expected <block end>, but found %r' % token.id,
+ token.start_mark,
+ )
+ token = self.scanner.get_token()
+ token.move_comment(self.scanner.peek_token())
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ # value token might have post comment move it to e.g. block
+ if self.scanner.check_token(ValueToken):
+ token.move_comment(self.scanner.peek_token())
+ else:
+ if not self.scanner.check_token(KeyToken):
+ token.move_comment(self.scanner.peek_token(), empty=True)
+ # else: empty value for this key cannot move token.comment
+ if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ comment = token.comment
+ if comment is None:
+ token = self.scanner.peek_token()
+ comment = token.comment
+ if comment:
+ token._comment = [None, comment[1]]
+ comment = [comment[0], None]
+ return self.process_empty_scalar(token.end_mark, comment=comment)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ # type: (bool) -> Any
+ if not self.scanner.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow sequence',
+ self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id,
+ token.start_mark,
+ )
+
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.peek_token()
+ event = MappingStartEvent(
+ None, None, True, token.start_mark, token.end_mark, flow_style=True
+ ) # type: Any
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.scanner.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ # type: () -> Any
+ self.state = self.parse_flow_sequence_entry
+ token = self.scanner.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ # type: () -> Any
+ token = self.scanner.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ # type: (Any) -> Any
+ if not self.scanner.check_token(FlowMappingEndToken):
+ if not first:
+ if self.scanner.check_token(FlowEntryToken):
+ self.scanner.get_token()
+ else:
+ token = self.scanner.peek_token()
+ raise ParserError(
+ 'while parsing a flow mapping',
+ self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id,
+ token.start_mark,
+ )
+ if self.scanner.check_token(KeyToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(
+ ValueToken, FlowEntryToken, FlowMappingEndToken
+ ):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif self.resolver.processing_version > (1, 1) and self.scanner.check_token(
+ ValueToken
+ ):
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(self.scanner.peek_token().end_mark)
+ elif not self.scanner.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.scanner.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ # type: () -> Any
+ if self.scanner.check_token(ValueToken):
+ token = self.scanner.get_token()
+ if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.scanner.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ # type: () -> Any
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.scanner.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark, comment=None):
+ # type: (Any, Any) -> Any
+ return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment)
+
+
+class RoundTripParser(Parser):
+ """roundtrip is a safe loader, that wants to see the unmangled tag"""
+
+ def transform_tag(self, handle, suffix):
+ # type: (Any, Any) -> Any
+ # return self.tag_handles[handle]+suffix
+ if handle == '!!' and suffix in (
+ u'null',
+ u'bool',
+ u'int',
+ u'float',
+ u'binary',
+ u'timestamp',
+ u'omap',
+ u'pairs',
+ u'set',
+ u'str',
+ u'seq',
+ u'map',
+ ):
+ return Parser.transform_tag(self, handle, suffix)
+ return handle + suffix
diff --git a/libs/dynaconf/vendor/ruamel/yaml/py.typed b/libs/dynaconf/vendor/ruamel/yaml/py.typed
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/py.typed
diff --git a/libs/dynaconf/vendor/ruamel/yaml/reader.py b/libs/dynaconf/vendor/ruamel/yaml/reader.py
new file mode 100644
index 000000000..52ec9a9b5
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/reader.py
@@ -0,0 +1,311 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length`
+# characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current
+# character.
+
+import codecs
+
+from .error import YAMLError, FileMark, StringMark, YAMLStreamError
+from .compat import text_type, binary_type, PY3, UNICODE_SIZE
+from .util import RegExp
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA
+# from .compat import StreamTextType # NOQA
+
+__all__ = ['Reader', 'ReaderError']
+
+
+class ReaderError(YAMLError):
+ def __init__(self, name, position, character, encoding, reason):
+ # type: (Any, Any, Any, Any, Any) -> None
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ # type: () -> str
+ if isinstance(self.character, binary_type):
+ return "'%s' codec can't decode byte #x%02x: %s\n" ' in "%s", position %d' % (
+ self.encoding,
+ ord(self.character),
+ self.reason,
+ self.name,
+ self.position,
+ )
+ else:
+ return 'unacceptable character #x%04x: %s\n' ' in "%s", position %d' % (
+ self.character,
+ self.reason,
+ self.name,
+ self.position,
+ )
+
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object (PY2) / a `bytes` object (PY3),
+ # - a `unicode` object (PY2) / a `str` object (PY3),
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream, loader=None):
+ # type: (Any, Any) -> None
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_reader', None) is None:
+ self.loader._reader = self
+ self.reset_reader()
+ self.stream = stream # type: Any # as .read is called
+
+ def reset_reader(self):
+ # type: () -> None
+ self.name = None # type: Any
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ""
+ self.pointer = 0
+ self.raw_buffer = None # type: Any
+ self.raw_decode = None
+ self.encoding = None # type: Optional[Text]
+ self.index = 0
+ self.line = 0
+ self.column = 0
+
+ @property
+ def stream(self):
+ # type: () -> Any
+ try:
+ return self._stream
+ except AttributeError:
+ raise YAMLStreamError('input stream needs to specified')
+
+ @stream.setter
+ def stream(self, val):
+ # type: (Any) -> None
+ if val is None:
+ return
+ self._stream = None
+ if isinstance(val, text_type):
+ self.name = '<unicode string>'
+ self.check_printable(val)
+ self.buffer = val + u'\0' # type: ignore
+ elif isinstance(val, binary_type):
+ self.name = '<byte string>'
+ self.raw_buffer = val
+ self.determine_encoding()
+ else:
+ if not hasattr(val, 'read'):
+ raise YAMLStreamError('stream argument needs to have a read() method')
+ self._stream = val
+ self.name = getattr(self.stream, 'name', '<file>')
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ # type: (int) -> Text
+ try:
+ return self.buffer[self.pointer + index]
+ except IndexError:
+ self.update(index + 1)
+ return self.buffer[self.pointer + index]
+
+ def prefix(self, length=1):
+ # type: (int) -> Any
+ if self.pointer + length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer : self.pointer + length]
+
+ def forward_1_1(self, length=1):
+ # type: (int) -> None
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' or (
+ ch == u'\r' and self.buffer[self.pointer] != u'\n'
+ ):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def forward(self, length=1):
+ # type: (int) -> None
+ if self.pointer + length + 1 >= len(self.buffer):
+ self.update(length + 1)
+ while length != 0:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch == u'\n' or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ # type: () -> Any
+ if self.stream is None:
+ return StringMark(
+ self.name, self.index, self.line, self.column, self.buffer, self.pointer
+ )
+ else:
+ return FileMark(self.name, self.index, self.line, self.column)
+
+ def determine_encoding(self):
+ # type: () -> None
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, binary_type):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode # type: ignore
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode # type: ignore
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode # type: ignore
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ if UNICODE_SIZE == 2:
+ NON_PRINTABLE = RegExp(
+ u'[^\x09\x0A\x0D\x20-\x7E\x85' u'\xA0-\uD7FF' u'\uE000-\uFFFD' u']'
+ )
+ else:
+ NON_PRINTABLE = RegExp(
+ u'[^\x09\x0A\x0D\x20-\x7E\x85'
+ u'\xA0-\uD7FF'
+ u'\uE000-\uFFFD'
+ u'\U00010000-\U0010FFFF'
+ u']'
+ )
+
+ _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii')
+
+ @classmethod
+ def _get_non_printable_ascii(cls, data): # type: ignore
+ # type: (Text, bytes) -> Optional[Tuple[int, Text]]
+ ascii_bytes = data.encode('ascii')
+ non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore
+ if not non_printables:
+ return None
+ non_printable = non_printables[:1]
+ return ascii_bytes.index(non_printable), non_printable.decode('ascii')
+
+ @classmethod
+ def _get_non_printable_regex(cls, data):
+ # type: (Text) -> Optional[Tuple[int, Text]]
+ match = cls.NON_PRINTABLE.search(data)
+ if not bool(match):
+ return None
+ return match.start(), match.group()
+
+ @classmethod
+ def _get_non_printable(cls, data):
+ # type: (Text) -> Optional[Tuple[int, Text]]
+ try:
+ return cls._get_non_printable_ascii(data) # type: ignore
+ except UnicodeEncodeError:
+ return cls._get_non_printable_regex(data)
+
+ def check_printable(self, data):
+ # type: (Any) -> None
+ non_printable_match = self._get_non_printable(data)
+ if non_printable_match is not None:
+ start, character = non_printable_match
+ position = self.index + (len(self.buffer) - self.pointer) + start
+ raise ReaderError(
+ self.name,
+ position,
+ ord(character),
+ 'unicode',
+ 'special characters are not allowed',
+ )
+
+ def update(self, length):
+ # type: (int) -> None
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer :]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ if PY3:
+ character = self.raw_buffer[exc.start]
+ else:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ elif self.stream is not None:
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character, exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=None):
+ # type: (Optional[int]) -> None
+ if size is None:
+ size = 4096 if PY3 else 1024
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
+
+
+# try:
+# import psyco
+# psyco.bind(Reader)
+# except ImportError:
+# pass
diff --git a/libs/dynaconf/vendor/ruamel/yaml/representer.py b/libs/dynaconf/vendor/ruamel/yaml/representer.py
new file mode 100644
index 000000000..985c9b24f
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/representer.py
@@ -0,0 +1,1283 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division
+
+
+from .error import * # NOQA
+from .nodes import * # NOQA
+from .compat import text_type, binary_type, to_unicode, PY2, PY3
+from .compat import ordereddict # type: ignore
+from .compat import nprint, nprintf # NOQA
+from .scalarstring import (
+ LiteralScalarString,
+ FoldedScalarString,
+ SingleQuotedScalarString,
+ DoubleQuotedScalarString,
+ PlainScalarString,
+)
+from .scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
+from .scalarfloat import ScalarFloat
+from .scalarbool import ScalarBoolean
+from .timestamp import TimeStamp
+
+import datetime
+import sys
+import types
+
+if PY3:
+ import copyreg
+ import base64
+else:
+ import copy_reg as copyreg # type: ignore
+
+if False: # MYPY
+ from typing import Dict, List, Any, Union, Text, Optional # NOQA
+
+# fmt: off
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError', 'RoundTripRepresenter']
+# fmt: on
+
+
+class RepresenterError(YAMLError):
+ pass
+
+
+if PY2:
+
+ def get_classobj_bases(cls):
+ # type: (Any) -> Any
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(get_classobj_bases(base))
+ return bases
+
+
+class BaseRepresenter(object):
+
+ yaml_representers = {} # type: Dict[Any, Any]
+ yaml_multi_representers = {} # type: Dict[Any, Any]
+
+ def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+ # type: (Any, Any, Any, Any) -> None
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._representer = self
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {} # type: Dict[Any, Any]
+ self.object_keeper = [] # type: List[Any]
+ self.alias_key = None # type: Optional[int]
+ self.sort_base_mapping_type_on_output = True
+
+ @property
+ def serializer(self):
+ # type: () -> Any
+ try:
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.serializer
+ return self.dumper._serializer
+ except AttributeError:
+ return self # cyaml
+
+ def represent(self, data):
+ # type: (Any) -> None
+ node = self.represent_data(data)
+ self.serializer.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ # type: (Any) -> Any
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ # if node is None:
+ # raise RepresenterError(
+ # "recursive objects are not allowed: %r" % data)
+ return node
+ # self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if PY2:
+ # if type(data) is types.InstanceType:
+ if isinstance(data, types.InstanceType):
+ data_types = get_classobj_bases(data.__class__) + list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, text_type(data))
+ # if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def represent_key(self, data):
+ # type: (Any) -> Any
+ """
+ David Fraser: Extract a method to represent keys in mappings, so that
+ a subclass can choose not to quote them (for example)
+ used in represent_mapping
+ https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c
+ """
+ return self.represent_data(data)
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ # type: (Any, Any) -> None
+ if 'yaml_representers' not in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ # type: (Any, Any) -> None
+ if 'yaml_multi_representers' not in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if style is None:
+ style = self.default_style
+ comment = None
+ if style and style[0] in '|>':
+ comment = getattr(value, 'comment', None)
+ if comment:
+ comment = [None, [comment]]
+ node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_omap(self, tag, omap, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ if self.sort_base_mapping_type_on_output:
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ return False
+
+
+class SafeRepresenter(BaseRepresenter):
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ # https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
+ # "i.e. two occurrences of the empty tuple may or may not yield the same object"
+ # so "data is ()" should not be used
+ if data is None or (isinstance(data, tuple) and data == ()):
+ return True
+ if isinstance(data, (binary_type, text_type, bool, int, float)):
+ return True
+ return False
+
+ def represent_none(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
+
+ if PY3:
+
+ def represent_str(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ # type: (Any) -> Any
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|')
+
+ else:
+
+ def represent_str(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data, anchor=None):
+ # type: (Any, Optional[Any]) -> Any
+ try:
+ value = self.dumper.boolean_representation[bool(data)]
+ except AttributeError:
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value, anchor=anchor)
+
+ def represent_int(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
+
+ if PY2:
+
+ def represent_long(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value * inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ # type: (Any) -> Any
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = to_unicode(repr(data)).lower()
+ if getattr(self.serializer, 'use_version', None) == (1, 1):
+ if u'.' not in value and u'e' in value:
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag in YAML 1.1. We fix
+ # this by adding '.0' before the 'e' symbol.
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ # type: (Any) -> Any
+ # pairs = (len(data) > 0 and isinstance(data, list))
+ # if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ # if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+
+ # value = []
+ # for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ # return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ # type: (Any) -> Any
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_ordereddict(self, data):
+ # type: (Any) -> Any
+ return self.represent_omap(u'tag:yaml.org,2002:omap', data)
+
+ def represent_set(self, data):
+ # type: (Any) -> Any
+ value = {} # type: Dict[Any, None]
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ # type: (Any) -> Any
+ value = to_unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ # type: (Any) -> Any
+ value = to_unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ # type: (Any) -> None
+ raise RepresenterError('cannot represent an object: %s' % (data,))
+
+
+SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
+
+if PY2:
+ SafeRepresenter.add_representer(unicode, SafeRepresenter.represent_unicode)
+else:
+ SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
+
+if PY2:
+ SafeRepresenter.add_representer(long, SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
+
+if sys.version_info >= (2, 7):
+ import collections
+
+ SafeRepresenter.add_representer(
+ collections.OrderedDict, SafeRepresenter.represent_ordereddict
+ )
+
+SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
+
+
+class Representer(SafeRepresenter):
+ if PY2:
+
+ def represent_str(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ # type: (Any) -> Any
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ # type: (Any) -> Any
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, to_unicode(data))
+
+ def represent_complex(self, data):
+ # type: (Any) -> Any
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ # type: (Any) -> Any
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ # type: (Any) -> Any
+ try:
+ name = u'%s.%s' % (data.__module__, data.__qualname__)
+ except AttributeError:
+ # probably PY2
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:' + name, "")
+
+ def represent_module(self, data):
+ # type: (Any) -> Any
+ return self.represent_scalar(u'tag:yaml.org,2002:python/module:' + data.__name__, "")
+
+ if PY2:
+
+ def represent_instance(self, data):
+ # type: (Any) -> Any
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed
+ # by calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never
+ # be called and the class instance is created by instantiating a
+ # trivial class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary,
+ # we produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:' + class_name, state
+ )
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:' + class_name, args
+ )
+ value = {}
+ if bool(args):
+ value['args'] = args
+ value['state'] = state # type: ignore
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:' + class_name, value
+ )
+
+ def represent_object(self, data):
+ # type: (Any) -> Any
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError('cannot represent object: %r' % (data,))
+ reduce = (list(reduce) + [None] * 5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ try:
+ function_name = u'%s.%s' % (function.__module__, function.__qualname__)
+ except AttributeError:
+ # probably PY2
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:' + function_name, state
+ )
+ if not listitems and not dictitems and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag + function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag + function_name, value)
+
+
+if PY2:
+ Representer.add_representer(str, Representer.represent_str)
+
+ Representer.add_representer(unicode, Representer.represent_unicode)
+
+ Representer.add_representer(long, Representer.represent_long)
+
+Representer.add_representer(complex, Representer.represent_complex)
+
+Representer.add_representer(tuple, Representer.represent_tuple)
+
+Representer.add_representer(type, Representer.represent_name)
+
+if PY2:
+ Representer.add_representer(types.ClassType, Representer.represent_name)
+
+Representer.add_representer(types.FunctionType, Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name)
+
+Representer.add_representer(types.ModuleType, Representer.represent_module)
+
+if PY2:
+ Representer.add_multi_representer(types.InstanceType, Representer.represent_instance)
+
+Representer.add_multi_representer(object, Representer.represent_object)
+
+Representer.add_multi_representer(type, Representer.represent_name)
+
+from .comments import (
+ CommentedMap,
+ CommentedOrderedMap,
+ CommentedSeq,
+ CommentedKeySeq,
+ CommentedKeyMap,
+ CommentedSet,
+ comment_attrib,
+ merge_attrib,
+ TaggedScalar,
+) # NOQA
+
+
+class RoundTripRepresenter(SafeRepresenter):
+ # need to add type here and write out the .comment
+ # in serializer and emitter
+
+ def __init__(self, default_style=None, default_flow_style=None, dumper=None):
+ # type: (Any, Any, Any) -> None
+ if not hasattr(dumper, 'typ') and default_flow_style is None:
+ default_flow_style = False
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=dumper,
+ )
+
+ def ignore_aliases(self, data):
+ # type: (Any) -> bool
+ try:
+ if data.anchor is not None and data.anchor.value is not None:
+ return False
+ except AttributeError:
+ pass
+ return SafeRepresenter.ignore_aliases(self, data)
+
+ def represent_none(self, data):
+ # type: (Any) -> Any
+ if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start:
+ # this will be open ended (although it is not yet)
+ return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
+ return self.represent_scalar(u'tag:yaml.org,2002:null', "")
+
+ def represent_literal_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '|'
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ represent_preserved_scalarstring = represent_literal_scalarstring
+
+ def represent_folded_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '>'
+ anchor = data.yaml_anchor(any=True)
+ for fold_pos in reversed(getattr(data, 'fold_pos', [])):
+ if (
+ data[fold_pos] == ' '
+ and (fold_pos > 0 and not data[fold_pos - 1].isspace())
+ and (fold_pos < len(data) and not data[fold_pos + 1].isspace())
+ ):
+ data = data[:fold_pos] + '\a' + data[fold_pos:]
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_single_quoted_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = "'"
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_double_quoted_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = '"'
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def represent_plain_scalarstring(self, data):
+ # type: (Any) -> Any
+ tag = None
+ style = ''
+ anchor = data.yaml_anchor(any=True)
+ if PY2 and not isinstance(data, unicode):
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data, style=style, anchor=anchor)
+
+ def insert_underscore(self, prefix, s, underscore, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ if underscore is None:
+ return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+ if underscore[0]:
+ sl = list(s)
+ pos = len(s) - underscore[0]
+ while pos > 0:
+ sl.insert(pos, '_')
+ pos -= underscore[0]
+ s = "".join(sl)
+ if underscore[1]:
+ s = '_' + s
+ if underscore[2]:
+ s += '_'
+ return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor)
+
+ def represent_scalar_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ s = '{:0{}d}'.format(data, data._width)
+ else:
+ s = format(data, 'd')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore("", s, data._underscore, anchor=anchor)
+
+ def represent_binary_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}b}', that strips the zeros
+ s = '{:0{}b}'.format(data, data._width)
+ else:
+ s = format(data, 'b')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0b', s, data._underscore, anchor=anchor)
+
+ def represent_octal_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}o}', that strips the zeros
+ s = '{:0{}o}'.format(data, data._width)
+ else:
+ s = format(data, 'o')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0o', s, data._underscore, anchor=anchor)
+
+ def represent_hex_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}x}', that strips the zeros
+ s = '{:0{}x}'.format(data, data._width)
+ else:
+ s = format(data, 'x')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_hex_caps_int(self, data):
+ # type: (Any) -> Any
+ if data._width is not None:
+ # cannot use '{:#0{}X}', that strips the zeros
+ s = '{:0{}X}'.format(data, data._width)
+ else:
+ s = format(data, 'X')
+ anchor = data.yaml_anchor(any=True)
+ return self.insert_underscore('0x', s, data._underscore, anchor=anchor)
+
+ def represent_scalar_float(self, data):
+ # type: (Any) -> Any
+ """ this is way more complicated """
+ value = None
+ anchor = data.yaml_anchor(any=True)
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ if value:
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor)
+ if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
+ # no exponent, but trailing dot
+ value = u'{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data)))
+ elif data._exp is None:
+ # no exponent, "normal" dot
+ prec = data._prec
+ ms = data._m_sign if data._m_sign else ""
+ # -1 for the dot
+ value = u'{}{:0{}.{}f}'.format(
+ ms, abs(data), data._width - len(ms), data._width - prec - 1
+ )
+ if prec == 0 or (prec == 1 and ms != ""):
+ value = value.replace(u'0.', u'.')
+ while len(value) < data._width:
+ value += u'0'
+ else:
+ # exponent
+ m, es = u'{:{}.{}e}'.format(
+ # data, data._width, data._width - data._prec + (1 if data._m_sign else 0)
+ data,
+ data._width,
+ data._width + (1 if data._m_sign else 0),
+ ).split('e')
+ w = data._width if data._prec > 0 else (data._width + 1)
+ if data < 0:
+ w += 1
+ m = m[:w]
+ e = int(es)
+ m1, m2 = m.split('.') # always second?
+ while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0):
+ m2 += u'0'
+ if data._m_sign and data > 0:
+ m1 = '+' + m1
+ esgn = u'+' if data._e_sign else ""
+ if data._prec < 0: # mantissa without dot
+ if m2 != u'0':
+ e -= len(m2)
+ else:
+ m2 = ""
+ while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
+ m2 += u'0'
+ e -= 1
+ value = m1 + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ elif data._prec == 0: # mantissa with trailing dot
+ e -= len(m2)
+ value = (
+ m1 + m2 + u'.' + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ )
+ else:
+ if data._m_lead0 > 0:
+ m2 = u'0' * (data._m_lead0 - 1) + m1 + m2
+ m1 = u'0'
+ m2 = m2[: -data._m_lead0] # these should be zeros
+ e += data._m_lead0
+ while len(m1) < data._prec:
+ m1 += m2[0]
+ m2 = m2[1:]
+ e -= 1
+ value = (
+ m1 + u'.' + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ )
+
+ if value is None:
+ value = to_unicode(repr(data)).lower()
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor)
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ # if the flow_style is None, the flow style tacked on to the object
+ # explicitly will be taken. If that is None as well the default flow
+ # style rules
+ try:
+ flow_style = sequence.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = sequence.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(sequence, comment_attrib)
+ node.comment = comment.comment
+ # reset any comment already printed information
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ item_comments = comment.items
+ node.comment = comment.comment
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for idx, item in enumerate(sequence):
+ node_item = self.represent_data(item)
+ self.merge_comments(node_item, item_comments.get(idx))
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if len(sequence) != 0 and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def merge_comments(self, node, comments):
+ # type: (Any, Any) -> Any
+ if comments is None:
+ assert hasattr(node, 'comment')
+ return node
+ if getattr(node, 'comment', None) is not None:
+ for idx, val in enumerate(comments):
+ if idx >= len(node.comment):
+ continue
+ nc = node.comment[idx]
+ if nc is not None:
+ assert val is None or val == nc
+ comments[idx] = nc
+ node.comment = comments
+ return node
+
+ def represent_key(self, data):
+ # type: (Any) -> Any
+ if isinstance(data, CommentedKeySeq):
+ self.alias_key = None
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data, flow_style=True)
+ if isinstance(data, CommentedKeyMap):
+ self.alias_key = None
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data, flow_style=True)
+ return SafeRepresenter.represent_key(self, data)
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ try:
+ flow_style = mapping.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = mapping.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(mapping, comment_attrib)
+ node.comment = comment.comment
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])]
+ try:
+ merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0]
+ except IndexError:
+ merge_pos = 0
+ item_count = 0
+ if bool(merge_list):
+ items = mapping.non_merged_items()
+ else:
+ items = mapping.items()
+ for item_key, item_value in items:
+ item_count += 1
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(item_value)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ assert getattr(node_key, 'comment', None) is None
+ node_key.comment = item_comment[:2]
+ nvc = getattr(node_value, 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_value.comment = item_comment[2:]
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ if bool(merge_list):
+ # because of the call to represent_data here, the anchors
+ # are marked as being used and thereby created
+ if len(merge_list) == 1:
+ arg = self.represent_data(merge_list[0])
+ else:
+ arg = self.represent_data(merge_list)
+ arg.flow_style = True
+ value.insert(merge_pos, (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg))
+ return node
+
+ def represent_omap(self, tag, omap, flow_style=None):
+ # type: (Any, Any, Any) -> Any
+ value = [] # type: List[Any]
+ try:
+ flow_style = omap.fa.flow_style(flow_style)
+ except AttributeError:
+ flow_style = flow_style
+ try:
+ anchor = omap.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ try:
+ comment = getattr(omap, comment_attrib)
+ node.comment = comment.comment
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in omap:
+ item_val = omap[item_key]
+ node_item = self.represent_data({item_key: item_val})
+ # node_item.flow_style = False
+ # node item has two scalars in value: node_key and node_value
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ if item_comment[1]:
+ node_item.comment = [None, item_comment[1]]
+ assert getattr(node_item.value[0][0], 'comment', None) is None
+ node_item.value[0][0].comment = [item_comment[0], None]
+ nvc = getattr(node_item.value[0][1], 'comment', None)
+ if nvc is not None: # end comment already there
+ nvc[0] = item_comment[2]
+ nvc[1] = item_comment[3]
+ else:
+ node_item.value[0][1].comment = item_comment[2:]
+ # if not (isinstance(node_item, ScalarNode) \
+ # and not node_item.style):
+ # best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_set(self, setting):
+ # type: (Any) -> Any
+ flow_style = False
+ tag = u'tag:yaml.org,2002:set'
+ # return self.represent_mapping(tag, value)
+ value = [] # type: List[Any]
+ flow_style = setting.fa.flow_style(flow_style)
+ try:
+ anchor = setting.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ # no sorting! !!
+ try:
+ comment = getattr(setting, comment_attrib)
+ node.comment = comment.comment
+ if node.comment and node.comment[1]:
+ for ct in node.comment[1]:
+ ct.reset()
+ item_comments = comment.items
+ for v in item_comments.values():
+ if v and v[1]:
+ for ct in v[1]:
+ ct.reset()
+ try:
+ node.comment.append(comment.end)
+ except AttributeError:
+ pass
+ except AttributeError:
+ item_comments = {}
+ for item_key in setting.odict:
+ node_key = self.represent_key(item_key)
+ node_value = self.represent_data(None)
+ item_comment = item_comments.get(item_key)
+ if item_comment:
+ assert getattr(node_key, 'comment', None) is None
+ node_key.comment = item_comment[:2]
+ node_key.style = node_value.style = '?'
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ best_style = best_style
+ return node
+
+ def represent_dict(self, data):
+ # type: (Any) -> Any
+ """write out tag if saved on loading"""
+ try:
+ t = data.tag.value
+ except AttributeError:
+ t = None
+ if t:
+ if t.startswith('!!'):
+ tag = 'tag:yaml.org,2002:' + t[2:]
+ else:
+ tag = t
+ else:
+ tag = u'tag:yaml.org,2002:map'
+ return self.represent_mapping(tag, data)
+
+ def represent_list(self, data):
+ # type: (Any) -> Any
+ try:
+ t = data.tag.value
+ except AttributeError:
+ t = None
+ if t:
+ if t.startswith('!!'):
+ tag = 'tag:yaml.org,2002:' + t[2:]
+ else:
+ tag = t
+ else:
+ tag = u'tag:yaml.org,2002:seq'
+ return self.represent_sequence(tag, data)
+
+ def represent_datetime(self, data):
+ # type: (Any) -> Any
+ inter = 'T' if data._yaml['t'] else ' '
+ _yaml = data._yaml
+ if _yaml['delta']:
+ data += _yaml['delta']
+ value = data.isoformat(inter)
+ else:
+ value = data.isoformat(inter)
+ if _yaml['tz']:
+ value += _yaml['tz']
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', to_unicode(value))
+
+ def represent_tagged_scalar(self, data):
+ # type: (Any) -> Any
+ try:
+ tag = data.tag.value
+ except AttributeError:
+ tag = None
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor)
+
+ def represent_scalar_bool(self, data):
+ # type: (Any) -> Any
+ try:
+ anchor = data.yaml_anchor()
+ except AttributeError:
+ anchor = None
+ return SafeRepresenter.represent_bool(self, data, anchor=anchor)
+
+
+RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
+
+RoundTripRepresenter.add_representer(
+ LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring
+)
+
+RoundTripRepresenter.add_representer(
+ PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring
+)
+
+# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple)
+
+RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int)
+
+RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int)
+
+RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
+
+RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
+
+RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int)
+
+RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float)
+
+RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool)
+
+RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
+
+RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
+
+RoundTripRepresenter.add_representer(
+ CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict
+)
+
+if sys.version_info >= (2, 7):
+ import collections
+
+ RoundTripRepresenter.add_representer(
+ collections.OrderedDict, RoundTripRepresenter.represent_ordereddict
+ )
+
+RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
+
+RoundTripRepresenter.add_representer(
+ TaggedScalar, RoundTripRepresenter.represent_tagged_scalar
+)
+
+RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/resolver.py b/libs/dynaconf/vendor/ruamel/yaml/resolver.py
new file mode 100644
index 000000000..d771d8069
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/resolver.py
@@ -0,0 +1,399 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+import re
+
+if False: # MYPY
+ from typing import Any, Dict, List, Union, Text, Optional # NOQA
+ from .compat import VersionType # NOQA
+
+from .compat import string_types, _DEFAULT_YAML_VERSION # NOQA
+from .error import * # NOQA
+from .nodes import MappingNode, ScalarNode, SequenceNode # NOQA
+from .util import RegExp # NOQA
+
+__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
+
+
+# fmt: off
+# resolvers consist of
+# - a list of applicable version
+# - a tag
+# - a regexp
+# - a list of first characters to match
+implicit_resolvers = [
+ ([(1, 2)],
+ u'tag:yaml.org,2002:bool',
+ RegExp(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X),
+ list(u'tTfF')),
+ ([(1, 1)],
+ u'tag:yaml.org,2002:bool',
+ RegExp(u'''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO')),
+ ([(1, 2)],
+ u'tag:yaml.org,2002:float',
+ RegExp(u'''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.')),
+ ([(1, 1)],
+ u'tag:yaml.org,2002:float',
+ RegExp(u'''^(?:
+ [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
+ |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
+ |\\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float
+ |[-+]?\\.(?:inf|Inf|INF)
+ |\\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.')),
+ ([(1, 2)],
+ u'tag:yaml.org,2002:int',
+ RegExp(u'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0o?[0-7_]+
+ |[-+]?[0-9_]+
+ |[-+]?0x[0-9a-fA-F_]+)$''', re.X),
+ list(u'-+0123456789')),
+ ([(1, 1)],
+ u'tag:yaml.org,2002:int',
+ RegExp(u'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0?[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int
+ list(u'-+0123456789')),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:merge',
+ RegExp(u'^(?:<<)$'),
+ [u'<']),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:null',
+ RegExp(u'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u'']),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:timestamp',
+ RegExp(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \\t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)?
+ (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789')),
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:value',
+ RegExp(u'^(?:=)$'),
+ [u'=']),
+ # The following resolver is only for documentation purposes. It cannot work
+ # because plain scalars cannot start with '!', '&', or '*'.
+ ([(1, 2), (1, 1)],
+ u'tag:yaml.org,2002:yaml',
+ RegExp(u'^(?:!|&|\\*)$'),
+ list(u'!&*')),
+]
+# fmt: on
+
+
+class ResolverError(YAMLError):
+ pass
+
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {} # type: Dict[Any, Any]
+ yaml_path_resolvers = {} # type: Dict[Any, Any]
+
+ def __init__(self, loadumper=None):
+ # type: (Any, Any) -> None
+ self.loadumper = loadumper
+ if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None:
+ self.loadumper._resolver = self.loadumper
+ self._loader_version = None # type: Any
+ self.resolver_exact_paths = [] # type: List[Any]
+ self.resolver_prefix_paths = [] # type: List[Any]
+
+ @property
+ def parser(self):
+ # type: () -> Any
+ if self.loadumper is not None:
+ if hasattr(self.loadumper, 'typ'):
+ return self.loadumper.parser
+ return self.loadumper._parser
+ return None
+
+ @classmethod
+ def add_implicit_resolver_base(cls, tag, regexp, first):
+ # type: (Any, Any, Any) -> None
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ # type: (Any, Any, Any) -> None
+ if 'yaml_implicit_resolvers' not in cls.__dict__:
+ # deepcopy doesn't work here
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
+
+ # @classmethod
+ # def add_implicit_resolver(cls, tag, regexp, first):
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # type: (Any, Any, Any) -> None
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if 'yaml_path_resolvers' not in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = [] # type: List[Any]
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError('Invalid path element: %s' % (element,))
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif (
+ node_check not in [ScalarNode, SequenceNode, MappingNode]
+ and not isinstance(node_check, string_types)
+ and node_check is not None
+ ):
+ raise ResolverError('Invalid node checker: %s' % (node_check,))
+ if not isinstance(index_check, (string_types, int)) and index_check is not None:
+ raise ResolverError('Invalid index checker: %s' % (index_check,))
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
+ raise ResolverError('Invalid node kind: %s' % (kind,))
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ # type: (Any, Any) -> None
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ # type: () -> None
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
+ # type: (int, Text, Any, Any, Any) -> bool
+ node_check, index_check = path[depth - 1]
+ if isinstance(node_check, string_types):
+ if current_node.tag != node_check:
+ return False
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return False
+ if index_check is True and current_index is not None:
+ return False
+ if (index_check is False or index_check is None) and current_index is None:
+ return False
+ if isinstance(index_check, string_types):
+ if not (
+ isinstance(current_index, ScalarNode) and index_check == current_index.value
+ ):
+ return False
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return False
+ return True
+
+ def resolve(self, kind, value, implicit):
+ # type: (Any, Any, Any) -> Any
+ if kind is ScalarNode and implicit[0]:
+ if value == "":
+ resolvers = self.yaml_implicit_resolvers.get("", [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ resolvers += self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if bool(self.yaml_path_resolvers):
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+ @property
+ def processing_version(self):
+ # type: () -> Any
+ return None
+
+
+class Resolver(BaseResolver):
+ pass
+
+
+for ir in implicit_resolvers:
+ if (1, 2) in ir[0]:
+ Resolver.add_implicit_resolver_base(*ir[1:])
+
+
+class VersionedResolver(BaseResolver):
+ """
+ contrary to the "normal" resolver, the smart resolver delays loading
+ the pattern matching rules. That way it can decide to load 1.1 rules
+ or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals
+ and Yes/No/On/Off booleans.
+ """
+
+ def __init__(self, version=None, loader=None, loadumper=None):
+ # type: (Optional[VersionType], Any, Any) -> None
+ if loader is None and loadumper is not None:
+ loader = loadumper
+ BaseResolver.__init__(self, loader)
+ self._loader_version = self.get_loader_version(version)
+ self._version_implicit_resolver = {} # type: Dict[Any, Any]
+
+ def add_version_implicit_resolver(self, version, tag, regexp, first):
+ # type: (VersionType, Any, Any, Any) -> None
+ if first is None:
+ first = [None]
+ impl_resolver = self._version_implicit_resolver.setdefault(version, {})
+ for ch in first:
+ impl_resolver.setdefault(ch, []).append((tag, regexp))
+
+ def get_loader_version(self, version):
+ # type: (Optional[VersionType]) -> Any
+ if version is None or isinstance(version, tuple):
+ return version
+ if isinstance(version, list):
+ return tuple(version)
+ # assume string
+ return tuple(map(int, version.split(u'.')))
+
+ @property
+ def versioned_resolver(self):
+ # type: () -> Any
+ """
+ select the resolver based on the version we are parsing
+ """
+ version = self.processing_version
+ if version not in self._version_implicit_resolver:
+ for x in implicit_resolvers:
+ if version in x[0]:
+ self.add_version_implicit_resolver(version, x[1], x[2], x[3])
+ return self._version_implicit_resolver[version]
+
+ def resolve(self, kind, value, implicit):
+ # type: (Any, Any, Any) -> Any
+ if kind is ScalarNode and implicit[0]:
+ if value == "":
+ resolvers = self.versioned_resolver.get("", [])
+ else:
+ resolvers = self.versioned_resolver.get(value[0], [])
+ resolvers += self.versioned_resolver.get(None, [])
+ for tag, regexp in resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if bool(self.yaml_path_resolvers):
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+ @property
+ def processing_version(self):
+ # type: () -> Any
+ try:
+ version = self.loadumper._scanner.yaml_version
+ except AttributeError:
+ try:
+ if hasattr(self.loadumper, 'typ'):
+ version = self.loadumper.version
+ else:
+ version = self.loadumper._serializer.use_version # dumping
+ except AttributeError:
+ version = None
+ if version is None:
+ version = self._loader_version
+ if version is None:
+ version = _DEFAULT_YAML_VERSION
+ return version
diff --git a/libs/dynaconf/vendor/ruamel/yaml/scalarbool.py b/libs/dynaconf/vendor/ruamel/yaml/scalarbool.py
new file mode 100644
index 000000000..e3ea2f245
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/scalarbool.py
@@ -0,0 +1,51 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+"""
+You cannot subclass bool, and this is necessary for round-tripping anchored
+bool values (and also if you want to preserve the original way of writing)
+
+bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well.
+
+You can use these in an if statement, but not when testing equivalence
+"""
+
+from .anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarBoolean']
+
+# no need for no_limit_int -> int
+
+
+class ScalarBoolean(int):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ anchor = kw.pop('anchor', None) # type: ignore
+ b = int.__new__(cls, *args, **kw) # type: ignore
+ if anchor is not None:
+ b.yaml_set_anchor(anchor, always_dump=True)
+ return b
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
diff --git a/libs/dynaconf/vendor/ruamel/yaml/scalarfloat.py b/libs/dynaconf/vendor/ruamel/yaml/scalarfloat.py
new file mode 100644
index 000000000..9553cd55f
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/scalarfloat.py
@@ -0,0 +1,127 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+import sys
+from .compat import no_limit_int # NOQA
+from .anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat']
+
+
+class ScalarFloat(float):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ width = kw.pop('width', None) # type: ignore
+ prec = kw.pop('prec', None) # type: ignore
+ m_sign = kw.pop('m_sign', None) # type: ignore
+ m_lead0 = kw.pop('m_lead0', 0) # type: ignore
+ exp = kw.pop('exp', None) # type: ignore
+ e_width = kw.pop('e_width', None) # type: ignore
+ e_sign = kw.pop('e_sign', None) # type: ignore
+ underscore = kw.pop('underscore', None) # type: ignore
+ anchor = kw.pop('anchor', None) # type: ignore
+ v = float.__new__(cls, *args, **kw) # type: ignore
+ v._width = width
+ v._prec = prec
+ v._m_sign = m_sign
+ v._m_lead0 = m_lead0
+ v._exp = exp
+ v._e_width = e_width
+ v._e_sign = e_sign
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) + a
+ x = type(self)(self + a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __ifloordiv__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) // a
+ x = type(self)(self // a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __imul__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) * a
+ x = type(self)(self * a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ x._prec = self._prec # check for others
+ return x
+
+ def __ipow__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) ** a
+ x = type(self)(self ** a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ def __isub__(self, a): # type: ignore
+ # type: (Any) -> Any
+ return float(self) - a
+ x = type(self)(self - a)
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ return x
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+ def dump(self, out=sys.stdout):
+ # type: (Any) -> Any
+ out.write(
+ 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format(
+ self,
+ self._width, # type: ignore
+ self._prec, # type: ignore
+ self._m_sign, # type: ignore
+ self._m_lead0, # type: ignore
+ self._underscore, # type: ignore
+ self._exp, # type: ignore
+ self._e_width, # type: ignore
+ self._e_sign, # type: ignore
+ )
+ )
+
+
+class ExponentialFloat(ScalarFloat):
+ def __new__(cls, value, width=None, underscore=None):
+ # type: (Any, Any, Any) -> Any
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
+
+
+class ExponentialCapsFloat(ScalarFloat):
+ def __new__(cls, value, width=None, underscore=None):
+ # type: (Any, Any, Any) -> Any
+ return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/scalarint.py b/libs/dynaconf/vendor/ruamel/yaml/scalarint.py
new file mode 100644
index 000000000..305af257e
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/scalarint.py
@@ -0,0 +1,130 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+from .compat import no_limit_int # NOQA
+from .anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt']
+
+
+class ScalarInt(no_limit_int):
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any, Any) -> Any
+ width = kw.pop('width', None) # type: ignore
+ underscore = kw.pop('underscore', None) # type: ignore
+ anchor = kw.pop('anchor', None) # type: ignore
+ v = no_limit_int.__new__(cls, *args, **kw) # type: ignore
+ v._width = width
+ v._underscore = underscore
+ if anchor is not None:
+ v.yaml_set_anchor(anchor, always_dump=True)
+ return v
+
+ def __iadd__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self + a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ifloordiv__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self // a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __imul__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self * a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __ipow__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self ** a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ def __isub__(self, a): # type: ignore
+ # type: (Any) -> Any
+ x = type(self)(self - a)
+ x._width = self._width # type: ignore
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
+ return x
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class BinaryInt(ScalarInt):
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class OctalInt(ScalarInt):
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+# mixed casing of A-F is not supported, when loading the first non digit
+# determines the case
+
+
+class HexInt(ScalarInt):
+ """uses lower case (a-f)"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class HexCapsInt(ScalarInt):
+ """uses upper case (A-F)"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
+
+
+class DecimalInt(ScalarInt):
+ """needed if anchor"""
+
+ def __new__(cls, value, width=None, underscore=None, anchor=None):
+ # type: (Any, Any, Any, Any) -> Any
+ return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/scalarstring.py b/libs/dynaconf/vendor/ruamel/yaml/scalarstring.py
new file mode 100644
index 000000000..2ec438386
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/scalarstring.py
@@ -0,0 +1,156 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+from .compat import text_type
+from .anchor import Anchor
+
+if False: # MYPY
+ from typing import Text, Any, Dict, List # NOQA
+
+__all__ = [
+ 'ScalarString',
+ 'LiteralScalarString',
+ 'FoldedScalarString',
+ 'SingleQuotedScalarString',
+ 'DoubleQuotedScalarString',
+ 'PlainScalarString',
+ # PreservedScalarString is the old name, as it was the first to be preserved on rt,
+ # use LiteralScalarString instead
+ 'PreservedScalarString',
+]
+
+
+class ScalarString(text_type):
+ __slots__ = Anchor.attrib
+
+ def __new__(cls, *args, **kw):
+ # type: (Any, Any) -> Any
+ anchor = kw.pop('anchor', None) # type: ignore
+ ret_val = text_type.__new__(cls, *args, **kw) # type: ignore
+ if anchor is not None:
+ ret_val.yaml_set_anchor(anchor, always_dump=True)
+ return ret_val
+
+ def replace(self, old, new, maxreplace=-1):
+ # type: (Any, Any, int) -> Any
+ return type(self)((text_type.replace(self, old, new, maxreplace)))
+
+ @property
+ def anchor(self):
+ # type: () -> Any
+ if not hasattr(self, Anchor.attrib):
+ setattr(self, Anchor.attrib, Anchor())
+ return getattr(self, Anchor.attrib)
+
+ def yaml_anchor(self, any=False):
+ # type: (bool) -> Any
+ if not hasattr(self, Anchor.attrib):
+ return None
+ if any or self.anchor.always_dump:
+ return self.anchor
+ return None
+
+ def yaml_set_anchor(self, value, always_dump=False):
+ # type: (Any, bool) -> None
+ self.anchor.value = value
+ self.anchor.always_dump = always_dump
+
+
+class LiteralScalarString(ScalarString):
+ __slots__ = 'comment' # the comment after the | on the first line
+
+ style = '|'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+PreservedScalarString = LiteralScalarString
+
+
+class FoldedScalarString(ScalarString):
+ __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line
+
+ style = '>'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class SingleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = "'"
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class DoubleQuotedScalarString(ScalarString):
+ __slots__ = ()
+
+ style = '"'
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+class PlainScalarString(ScalarString):
+ __slots__ = ()
+
+ style = ''
+
+ def __new__(cls, value, anchor=None):
+ # type: (Text, Any) -> Any
+ return ScalarString.__new__(cls, value, anchor=anchor)
+
+
+def preserve_literal(s):
+ # type: (Text) -> Text
+ return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
+
+
+def walk_tree(base, map=None):
+ # type: (Any, Any) -> None
+ """
+ the routine here walks over a simple yaml tree (recursing in
+ dict values and list items) and converts strings that
+ have multiple lines to literal scalars
+
+ You can also provide an explicit (ordered) mapping for multiple transforms
+ (first of which is executed):
+ map = ruamel.yaml.compat.ordereddict
+ map['\n'] = preserve_literal
+ map[':'] = SingleQuotedScalarString
+ walk_tree(data, map=map)
+ """
+ from dynaconf.vendor.ruamel.yaml.compat import string_types
+ from dynaconf.vendor.ruamel.yaml.compat import MutableMapping, MutableSequence # type: ignore
+
+ if map is None:
+ map = {'\n': preserve_literal}
+
+ if isinstance(base, MutableMapping):
+ for k in base:
+ v = base[k] # type: Text
+ if isinstance(v, string_types):
+ for ch in map:
+ if ch in v:
+ base[k] = map[ch](v)
+ break
+ else:
+ walk_tree(v)
+ elif isinstance(base, MutableSequence):
+ for idx, elem in enumerate(base):
+ if isinstance(elem, string_types):
+ for ch in map:
+ if ch in elem: # type: ignore
+ base[idx] = map[ch](elem)
+ break
+ else:
+ walk_tree(elem)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/scanner.py b/libs/dynaconf/vendor/ruamel/yaml/scanner.py
new file mode 100644
index 000000000..7872a4cd6
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/scanner.py
@@ -0,0 +1,1980 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# RoundTripScanner
+# COMMENT(value)
+#
+# Read comments in the Scanner code for more details.
+#
+
+from .error import MarkedYAMLError
+from .tokens import * # NOQA
+from .compat import utf8, unichr, PY3, check_anchorname_char, nprint # NOQA
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Union, Text # NOQA
+ from .compat import VersionType # NOQA
+
+__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError']
+
+
+_THE_END = '\n\0\r\x85\u2028\u2029'
+_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029'
+_SPACE_TAB = ' \t'
+
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ # type: (Any, Any, int, int, int, Any) -> None
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+
+class Scanner(object):
+ def __init__(self, loader=None):
+ # type: (Any) -> None
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer
+
+ self.loader = loader
+ if self.loader is not None and getattr(self.loader, '_scanner', None) is None:
+ self.loader._scanner = self
+ self.reset_scanner()
+ self.first_time = False
+ self.yaml_version = None # type: Any
+
+ @property
+ def flow_level(self):
+ # type: () -> int
+ return len(self.flow_context)
+
+ def reset_scanner(self):
+ # type: () -> None
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # flow_context is an expanding/shrinking list consisting of '{' and '['
+ # for each unclosed flow context. If empty list that means block context
+ self.flow_context = [] # type: List[Text]
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = [] # type: List[Any]
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = [] # type: List[int]
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {} # type: Dict[Any, Any]
+
+ @property
+ def reader(self):
+ # type: () -> Any
+ try:
+ return self._scanner_reader # type: ignore
+ except AttributeError:
+ if hasattr(self.loader, 'typ'):
+ self._scanner_reader = self.loader.reader
+ else:
+ self._scanner_reader = self.loader._reader
+ return self._scanner_reader
+
+ @property
+ def scanner_processing_version(self): # prefix until un-composited
+ # type: () -> Any
+ if hasattr(self.loader, 'typ'):
+ return self.loader.resolver.processing_version
+ return self.loader.processing_version
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # type: (Any) -> bool
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if bool(self.tokens):
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # type: () -> Any
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if bool(self.tokens):
+ return self.tokens[0]
+
+ def get_token(self):
+ # type: () -> Any
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if bool(self.tokens):
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ # type: () -> bool
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+ return False
+
+ def fetch_comment(self, comment):
+ # type: (Any) -> None
+ raise NotImplementedError
+
+ def fetch_more_tokens(self):
+ # type: () -> Any
+ # Eat whitespaces and comments until we reach the next token.
+ comment = self.scan_to_next_token()
+ if comment is not None: # never happens for base scanner
+ return self.fetch_comment(comment)
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.reader.column)
+
+ # Peek the next character.
+ ch = self.reader.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ # if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == "'":
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError(
+ 'while scanning for the next token',
+ None,
+ 'found character %r that cannot start any token' % utf8(ch),
+ self.reader.get_mark(),
+ )
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # type: () -> Any
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # type: () -> None
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.reader.line or self.reader.index - key.index > 1024:
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # type: () -> None
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.reader.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken + len(self.tokens)
+ key = SimpleKey(
+ token_number,
+ required,
+ self.reader.index,
+ self.reader.line,
+ self.reader.column,
+ self.reader.get_mark(),
+ )
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # type: () -> None
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError(
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+ # type: (Any) -> None
+ # In flow context, tokens should respect indentation.
+ # Actually the condition should be `self.indent >= column` according to
+ # the spec. But this condition will prohibit intuitively correct
+ # constructions such as
+ # key : {
+ # }
+ # ####
+ # if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid intendation or unclosed '[' or '{'",
+ # self.reader.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if bool(self.flow_level):
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.reader.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # type: (int) -> bool
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # type: () -> None
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding))
+
+ def fetch_stream_end(self):
+ # type: () -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+ # Read the token.
+ mark = self.reader.get_mark()
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+ # type: () -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ # type: () -> None
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ # type: () -> None
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+ # type: (Any) -> None
+ # Set the current intendation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward(3)
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ # type: () -> None
+ self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[')
+
+ def fetch_flow_mapping_start(self):
+ # type: () -> None
+ self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{')
+
+ def fetch_flow_collection_start(self, TokenClass, to_push):
+ # type: (Any, Text) -> None
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+ # Increase the flow level.
+ self.flow_context.append(to_push)
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ # type: () -> None
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ # type: () -> None
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+ # type: (Any) -> None
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Decrease the flow level.
+ try:
+ popped = self.flow_context.pop() # NOQA
+ except IndexError:
+ # We must not be in a list or object.
+ # Defer error handling to the parser.
+ pass
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+ # type: () -> None
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Add FLOW-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+ # type: () -> None
+ # Block context needs additional checks.
+ if not self.flow_level:
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None, None, 'sequence entries are not allowed here', self.reader.get_mark()
+ )
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+ # type: () -> None
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not nessesary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None, None, 'mapping keys are not allowed here', self.reader.get_mark()
+ )
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+ # type: () -> None
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(
+ key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark)
+ )
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(
+ key.token_number - self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark),
+ )
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(
+ None,
+ None,
+ 'mapping values are not allowed here',
+ self.reader.get_mark(),
+ )
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.reader.column):
+ mark = self.reader.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.reader.get_mark()
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+ # type: () -> None
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+ # type: () -> None
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+ # type: () -> None
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ # type: () -> None
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ # type: () -> None
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+ # type: (Any) -> None
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ # type: () -> None
+ self.fetch_flow_scalar(style="'")
+
+ def fetch_double(self):
+ # type: () -> None
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+ # type: (Any) -> None
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+ # type: () -> None
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+ # type: () -> Any
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.reader.column == 0:
+ return True
+ return None
+
+ def check_document_start(self):
+ # type: () -> Any
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_document_end(self):
+ # type: () -> Any
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.reader.column == 0:
+ if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ return True
+ return None
+
+ def check_block_entry(self):
+ # type: () -> Any
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_key(self):
+ # type: () -> Any
+ # KEY(flow context): '?'
+ if bool(self.flow_level):
+ return True
+ # KEY(block context): '?' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_value(self):
+ # type: () -> Any
+ # VALUE(flow context): ':'
+ if self.scanner_processing_version == (1, 1):
+ if bool(self.flow_level):
+ return True
+ else:
+ if bool(self.flow_level):
+ if self.flow_context[-1] == '[':
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ elif self.tokens and isinstance(self.tokens[-1], ValueToken):
+ # mapping flow context scanning a value token
+ if self.reader.peek(1) not in _THE_END_SPACE_TAB:
+ return False
+ return True
+ # VALUE(block context): ':' (' '|'\n')
+ return self.reader.peek(1) in _THE_END_SPACE_TAB
+
+ def check_plain(self):
+ # type: () -> Any
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ srp = self.reader.peek
+ ch = srp()
+ if self.scanner_processing_version == (1, 1):
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or (
+ srp(1) not in _THE_END_SPACE_TAB
+ and (ch == '-' or (not self.flow_level and ch in '?:'))
+ )
+ # YAML 1.2
+ if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`':
+ # ################### ^ ???
+ return True
+ ch1 = srp(1)
+ if ch == '-' and ch1 not in _THE_END_SPACE_TAB:
+ return True
+ if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB:
+ return True
+
+ return srp(1) not in _THE_END_SPACE_TAB and (
+ ch == '-' or (not self.flow_level and ch in '?:')
+ )
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # type: () -> Any
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ _the_end = _THE_END
+ while not found:
+ while srp() == ' ':
+ srf()
+ if srp() == '#':
+ while srp() not in _the_end:
+ srf()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+ return None
+
+ def scan_directive(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ start_mark = self.reader.get_mark()
+ srf()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.reader.get_mark()
+ else:
+ end_mark = self.reader.get_mark()
+ while srp() not in _THE_END:
+ srf()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ length = 0
+ srp = self.reader.peek
+ ch = srp(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.':
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ major = self.scan_yaml_directive_number(start_mark)
+ if srp() != '.':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected a digit or '.', but found %r" % utf8(srp()),
+ self.reader.get_mark(),
+ )
+ srf()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if srp() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected a digit or ' ', but found %r" % utf8(srp()),
+ self.reader.get_mark(),
+ )
+ self.yaml_version = (major, minor)
+ return self.yaml_version
+
+ def scan_yaml_directive_number(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ ch = srp()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected a digit, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ length = 0
+ while '0' <= srp(length) <= '9':
+ length += 1
+ value = int(self.reader.prefix(length))
+ srf(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while srp() == ' ':
+ srf()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.reader.peek()
+ if ch != ' ':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.reader.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # type: (Any) -> None
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while srp() == ' ':
+ srf()
+ if srp() == '#':
+ while srp() not in _THE_END:
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ 'expected a comment or a line break, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # type: (Any) -> Any
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpteted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ indicator = srp()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.reader.forward()
+ length = 0
+ ch = srp(length)
+ # while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ # or ch in u'-_':
+ while check_anchorname_char(ch):
+ length += 1
+ ch = srp(length)
+ if not length:
+ raise ScannerError(
+ 'while scanning an %s' % (name,),
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ # ch1 = ch
+ # ch = srp() # no need to peek, ch is already set
+ # assert ch1 == ch
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`':
+ raise ScannerError(
+ 'while scanning an %s' % (name,),
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ end_mark = self.reader.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ start_mark = self.reader.get_mark()
+ ch = srp(1)
+ if ch == '<':
+ handle = None
+ self.reader.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if srp() != '>':
+ raise ScannerError(
+ 'while parsing a tag',
+ start_mark,
+ "expected '>', but found %r" % utf8(srp()),
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in _THE_END_SPACE_TAB:
+ handle = None
+ suffix = '!'
+ self.reader.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = srp(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.reader.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a tag',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ value = (handle, suffix)
+ end_mark = self.reader.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style, rt=False):
+ # type: (Any, Optional[bool]) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+
+ # Scan the header.
+ self.reader.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ # block scalar comment e.g. : |+ # comment text
+ block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent + 1
+ if increment is None:
+ # no increment and top level, min_indent could be 0
+ if min_indent < 1 and (
+ style not in '|>'
+ or (self.scanner_processing_version == (1, 1))
+ and getattr(
+ self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False
+ )
+ ):
+ min_indent = 1
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ if min_indent < 1:
+ min_indent = 1
+ indent = min_indent + increment - 1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ""
+
+ # Scan the inner part of the block scalar.
+ while self.reader.column == indent and srp() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = srp() not in ' \t'
+ length = 0
+ while srp(length) not in _THE_END:
+ length += 1
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if style in '|>' and min_indent == 0:
+ # at the beginning of a line, if in block style see if
+ # end of document/start_new_document
+ if self.check_document_start() or self.check_document_end():
+ break
+ if self.reader.column == indent and srp() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if rt and folded and line_break == '\n':
+ chunks.append('\a')
+ if folded and line_break == '\n' and leading_non_space and srp() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ # if folded and line_break == u'\n':
+ # if not breaks:
+ # if srp() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ # else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Process trailing line breaks. The 'chomping' setting determines
+ # whether they are included in the value.
+ trailing = [] # type: List[Any]
+ if chomping in [None, True]:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+ elif chomping in [None, False]:
+ trailing.extend(breaks)
+
+ # We are done.
+ token = ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+ if block_scalar_comment is not None:
+ token.add_pre_comments([block_scalar_comment])
+ if len(trailing) > 0:
+ # nprint('trailing 1', trailing) # XXXXX
+ # Eat whitespaces and comments until we reach the next token.
+ comment = self.scan_to_next_token()
+ while comment:
+ trailing.append(' ' * comment[1].column + comment[0])
+ comment = self.scan_to_next_token()
+
+ # Keep track of the trailing whitespace and following comments
+ # as a comment token, if isn't all included in the actual value.
+ comment_end_mark = self.reader.get_mark()
+ comment = CommentToken("".join(trailing), end_mark, comment_end_mark)
+ token.add_post_comment(comment)
+ return token
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ chomping = None
+ increment = None
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
+ self.reader.forward()
+ ch = srp()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.reader.forward()
+ ch = srp()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected chomping or indentation indicators, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # type: (Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ prefix = ''
+ comment = None
+ while srp() == ' ':
+ prefix += srp()
+ srf()
+ if srp() == '#':
+ comment = prefix
+ while srp() not in _THE_END:
+ comment += srp()
+ srf()
+ ch = srp()
+ if ch not in _THE_END:
+ raise ScannerError(
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected a comment or a line break, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ self.scan_line_break()
+ return comment
+
+ def scan_block_scalar_indentation(self):
+ # type: () -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ max_indent = 0
+ end_mark = self.reader.get_mark()
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ else:
+ srf()
+ if self.reader.column > max_indent:
+ max_indent = self.reader.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # type: (int) -> Any
+ # See the specification for details.
+ chunks = []
+ srp = self.reader.peek
+ srf = self.reader.forward
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ while srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.reader.get_mark()
+ while self.reader.column < indent and srp() == ' ':
+ srf()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # type: (Any) -> Any
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ srp = self.reader.peek
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+ quote = srp()
+ self.reader.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while srp() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.reader.forward()
+ end_mark = self.reader.get_mark()
+ return ScalarToken("".join(chunks), False, start_mark, end_mark, style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '"': '"',
+ '/': '/', # as per http://www.json.org/
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8}
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ chunks = [] # type: List[Any]
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ length = 0
+ while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029':
+ length += 1
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ ch = srp()
+ if not double and ch == "'" and srp(1) == "'":
+ chunks.append("'")
+ srf(2)
+ elif (double and ch == "'") or (not double and ch in '"\\'):
+ chunks.append(ch)
+ srf()
+ elif double and ch == '\\':
+ srf()
+ ch = srp()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ srf()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ srf()
+ for k in range(length):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ 'expected escape sequence of %d hexdecimal '
+ 'numbers, but found %r' % (length, utf8(srp(k))),
+ self.reader.get_mark(),
+ )
+ code = int(self.reader.prefix(length), 16)
+ chunks.append(unichr(code))
+ srf(length)
+ elif ch in '\n\r\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError(
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ 'found unknown escape character %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ while srp(length) in ' \t':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch == '\0':
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected end of stream',
+ self.reader.get_mark(),
+ )
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ chunks = [] # type: List[Any]
+ srp = self.reader.peek
+ srf = self.reader.forward
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected document separator',
+ self.reader.get_mark(),
+ )
+ while srp() in ' \t':
+ srf()
+ if srp() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # type: () -> Any
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',', ': ' and '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = [] # type: List[Any]
+ start_mark = self.reader.get_mark()
+ end_mark = start_mark
+ indent = self.indent + 1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ # if indent == 0:
+ # indent = 1
+ spaces = [] # type: List[Any]
+ while True:
+ length = 0
+ if srp() == '#':
+ break
+ while True:
+ ch = srp(length)
+ if ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB:
+ pass
+ elif ch == '?' and self.scanner_processing_version != (1, 1):
+ pass
+ elif (
+ ch in _THE_END_SPACE_TAB
+ or (
+ not self.flow_level
+ and ch == ':'
+ and srp(length + 1) in _THE_END_SPACE_TAB
+ )
+ or (self.flow_level and ch in ',:?[]{}')
+ ):
+ break
+ length += 1
+ # It's not clear what we should do with ':' in the flow context.
+ if (
+ self.flow_level
+ and ch == ':'
+ and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'
+ ):
+ srf(length)
+ raise ScannerError(
+ 'while scanning a plain scalar',
+ start_mark,
+ "found unexpected ':'",
+ self.reader.get_mark(),
+ 'Please check '
+ 'http://pyyaml.org/wiki/YAMLColonInFlowContext '
+ 'for details.',
+ )
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.reader.prefix(length))
+ srf(length)
+ end_mark = self.reader.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if (
+ not spaces
+ or srp() == '#'
+ or (not self.flow_level and self.reader.column < indent)
+ ):
+ break
+
+ token = ScalarToken("".join(chunks), True, start_mark, end_mark)
+ if spaces and spaces[0] == '\n':
+ # Create a comment token to preserve the trailing line breaks.
+ comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark)
+ token.add_post_comment(comment)
+ return token
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ srp = self.reader.peek
+ srf = self.reader.forward
+ chunks = []
+ length = 0
+ while srp(length) in ' ':
+ length += 1
+ whitespaces = self.reader.prefix(length)
+ self.reader.forward(length)
+ ch = srp()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ breaks = []
+ while srp() in ' \r\n\x85\u2028\u2029':
+ if srp() == ' ':
+ srf()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.reader.prefix(3)
+ if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB:
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ srp = self.reader.peek
+ ch = srp()
+ if ch != '!':
+ raise ScannerError(
+ 'while scanning a %s' % (name,),
+ start_mark,
+ "expected '!', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ length = 1
+ ch = srp(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_':
+ length += 1
+ ch = srp(length)
+ if ch != '!':
+ self.reader.forward(length)
+ raise ScannerError(
+ 'while scanning a %s' % (name,),
+ start_mark,
+ "expected '!', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
+ length += 1
+ value = self.reader.prefix(length)
+ self.reader.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ srp = self.reader.peek
+ chunks = []
+ length = 0
+ ch = srp(length)
+ while (
+ '0' <= ch <= '9'
+ or 'A' <= ch <= 'Z'
+ or 'a' <= ch <= 'z'
+ or ch in "-;/?:@&=+$,_.!~*'()[]%"
+ or ((self.scanner_processing_version > (1, 1)) and ch == '#')
+ ):
+ if ch == '%':
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = srp(length)
+ if length != 0:
+ chunks.append(self.reader.prefix(length))
+ self.reader.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError(
+ 'while parsing a %s' % (name,),
+ start_mark,
+ 'expected URI, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return "".join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # type: (Any, Any) -> Any
+ # See the specification for details.
+ srp = self.reader.peek
+ srf = self.reader.forward
+ code_bytes = [] # type: List[Any]
+ mark = self.reader.get_mark()
+ while srp() == '%':
+ srf()
+ for k in range(2):
+ if srp(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError(
+ 'while scanning a %s' % (name,),
+ start_mark,
+ 'expected URI escape sequence of 2 hexdecimal numbers,'
+ ' but found %r' % utf8(srp(k)),
+ self.reader.get_mark(),
+ )
+ if PY3:
+ code_bytes.append(int(self.reader.prefix(2), 16))
+ else:
+ code_bytes.append(chr(int(self.reader.prefix(2), 16)))
+ srf(2)
+ try:
+ if PY3:
+ value = bytes(code_bytes).decode('utf-8')
+ else:
+ value = unicode(b"".join(code_bytes), 'utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError('while scanning a %s' % (name,), start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # type: () -> Any
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.reader.peek()
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ return ""
+
+
+class RoundTripScanner(Scanner):
+ def check_token(self, *choices):
+ # type: (Any) -> bool
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if bool(self.tokens):
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # type: () -> Any
+ # Return the next token, but do not delete if from the queue.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if bool(self.tokens):
+ return self.tokens[0]
+ return None
+
+ def _gather_comments(self):
+ # type: () -> Any
+ """combine multiple comment lines"""
+ comments = [] # type: List[Any]
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ comment = self.tokens.pop(0)
+ self.tokens_taken += 1
+ comments.append(comment)
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if not self.tokens:
+ return comments
+ if isinstance(self.tokens[0], CommentToken):
+ self.tokens_taken += 1
+ comment = self.tokens.pop(0)
+ # nprint('dropping2', comment)
+ comments.append(comment)
+ if len(comments) >= 1:
+ self.tokens[0].add_pre_comments(comments)
+ # pull in post comment on e.g. ':'
+ if not self.done and len(self.tokens) < 2:
+ self.fetch_more_tokens()
+
+ def get_token(self):
+ # type: () -> Any
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ self._gather_comments()
+ if bool(self.tokens):
+ # nprint('tk', self.tokens)
+ # only add post comment to single line tokens:
+ # scalar, value token. FlowXEndToken, otherwise
+ # hidden streamtokens could get them (leave them and they will be
+ # pre comments for the next map/seq
+ if (
+ len(self.tokens) > 1
+ and isinstance(
+ self.tokens[0],
+ (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken),
+ )
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens[0].add_post_comment(c)
+ elif (
+ len(self.tokens) > 1
+ and isinstance(self.tokens[0], ScalarToken)
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line
+ ):
+ self.tokens_taken += 1
+ c = self.tokens.pop(1)
+ c.value = (
+ '\n' * (c.start_mark.line - self.tokens[0].end_mark.line)
+ + (' ' * c.start_mark.column)
+ + c.value
+ )
+ self.tokens[0].add_post_comment(c)
+ self.fetch_more_tokens()
+ while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken):
+ self.tokens_taken += 1
+ c1 = self.tokens.pop(1)
+ c.value = c.value + (' ' * c1.start_mark.column) + c1.value
+ self.fetch_more_tokens()
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+ return None
+
+ def fetch_comment(self, comment):
+ # type: (Any) -> None
+ value, start_mark, end_mark = comment
+ while value and value[-1] == ' ':
+ # empty line within indented key context
+ # no need to update end-mark, that is not used
+ value = value[:-1]
+ self.tokens.append(CommentToken(value, start_mark, end_mark))
+
+ # scanner
+
+ def scan_to_next_token(self):
+ # type: () -> Any
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ srp = self.reader.peek
+ srf = self.reader.forward
+ if self.reader.index == 0 and srp() == '\uFEFF':
+ srf()
+ found = False
+ while not found:
+ while srp() == ' ':
+ srf()
+ ch = srp()
+ if ch == '#':
+ start_mark = self.reader.get_mark()
+ comment = ch
+ srf()
+ while ch not in _THE_END:
+ ch = srp()
+ if ch == '\0': # don't gobble the end-of-stream character
+ # but add an explicit newline as "YAML processors should terminate
+ # the stream with an explicit line break
+ # https://yaml.org/spec/1.2/spec.html#id2780069
+ comment += '\n'
+ break
+ comment += ch
+ srf()
+ # gather any blank lines following the comment too
+ ch = self.scan_line_break()
+ while len(ch) > 0:
+ comment += ch
+ ch = self.scan_line_break()
+ end_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ return comment, start_mark, end_mark
+ if bool(self.scan_line_break()):
+ start_mark = self.reader.get_mark()
+ if not self.flow_level:
+ self.allow_simple_key = True
+ ch = srp()
+ if ch == '\n': # empty toplevel lines
+ start_mark = self.reader.get_mark()
+ comment = ""
+ while ch:
+ ch = self.scan_line_break(empty_line=True)
+ comment += ch
+ if srp() == '#':
+ # empty line followed by indented real comment
+ comment = comment.rsplit('\n', 1)[0] + '\n'
+ end_mark = self.reader.get_mark()
+ return comment, start_mark, end_mark
+ else:
+ found = True
+ return None
+
+ def scan_line_break(self, empty_line=False):
+ # type: (bool) -> Text
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.reader.peek() # type: Text
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
+ self.reader.forward(2)
+ else:
+ self.reader.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.reader.forward()
+ return ch
+ elif empty_line and ch in '\t ':
+ self.reader.forward()
+ return ch
+ return ""
+
+ def scan_block_scalar(self, style, rt=True):
+ # type: (Any, Optional[bool]) -> Any
+ return Scanner.scan_block_scalar(self, style, rt=rt)
+
+
+# try:
+# import psyco
+# psyco.bind(Scanner)
+# except ImportError:
+# pass
diff --git a/libs/dynaconf/vendor/ruamel/yaml/serializer.py b/libs/dynaconf/vendor/ruamel/yaml/serializer.py
new file mode 100644
index 000000000..0a28c60b8
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/serializer.py
@@ -0,0 +1,240 @@
+# coding: utf-8
+
+from __future__ import absolute_import
+
+from .error import YAMLError
+from .compat import nprint, DBG_NODE, dbg, string_types, nprintf # NOQA
+from .util import RegExp
+
+from .events import (
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+ DocumentStartEvent,
+ DocumentEndEvent,
+)
+from .nodes import MappingNode, ScalarNode, SequenceNode
+
+if False: # MYPY
+ from typing import Any, Dict, Union, Text, Optional # NOQA
+ from .compat import VersionType # NOQA
+
+__all__ = ['Serializer', 'SerializerError']
+
+
+class SerializerError(YAMLError):
+ pass
+
+
+class Serializer(object):
+
+ # 'id' and 3+ numbers, but not 000
+ ANCHOR_TEMPLATE = u'id%03d'
+ ANCHOR_RE = RegExp(u'id(?!000$)\\d{3,}')
+
+ def __init__(
+ self,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ dumper=None,
+ ):
+ # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA
+ self.dumper = dumper
+ if self.dumper is not None:
+ self.dumper._serializer = self
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ if isinstance(version, string_types):
+ self.use_version = tuple(map(int, version.split('.')))
+ else:
+ self.use_version = version # type: ignore
+ self.use_tags = tags
+ self.serialized_nodes = {} # type: Dict[Any, Any]
+ self.anchors = {} # type: Dict[Any, Any]
+ self.last_anchor_id = 0
+ self.closed = None # type: Optional[bool]
+ self._templated_id = None
+
+ @property
+ def emitter(self):
+ # type: () -> Any
+ if hasattr(self.dumper, 'typ'):
+ return self.dumper.emitter
+ return self.dumper._emitter
+
+ @property
+ def resolver(self):
+ # type: () -> Any
+ if hasattr(self.dumper, 'typ'):
+ self.dumper.resolver
+ return self.dumper._resolver
+
+ def open(self):
+ # type: () -> None
+ if self.closed is None:
+ self.emitter.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ else:
+ raise SerializerError('serializer is already opened')
+
+ def close(self):
+ # type: () -> None
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif not self.closed:
+ self.emitter.emit(StreamEndEvent())
+ self.closed = True
+
+ # def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ # type: (Any) -> None
+ if dbg(DBG_NODE):
+ nprint('Serializing nodes')
+ node.dump()
+ if self.closed is None:
+ raise SerializerError('serializer is not opened')
+ elif self.closed:
+ raise SerializerError('serializer is closed')
+ self.emitter.emit(
+ DocumentStartEvent(
+ explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags
+ )
+ )
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ # type: (Any) -> None
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ anchor = None
+ try:
+ if node.anchor.always_dump:
+ anchor = node.anchor.value
+ except: # NOQA
+ pass
+ self.anchors[node] = anchor
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ # type: (Any) -> Any
+ try:
+ anchor = node.anchor.value
+ except: # NOQA
+ anchor = None
+ if anchor is None:
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+ return anchor
+
+ def serialize_node(self, node, parent, index):
+ # type: (Any, Any, Any) -> None
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emitter.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.resolver.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ # here check if the node.tag equals the one that would result from parsing
+ # if not equal quoting is necessary for strings
+ detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True))
+ implicit = (
+ (node.tag == detected_tag),
+ (node.tag == default_tag),
+ node.tag.startswith('tag:yaml.org,2002:'),
+ )
+ self.emitter.emit(
+ ScalarEvent(
+ alias,
+ node.tag,
+ implicit,
+ node.value,
+ style=node.style,
+ comment=node.comment,
+ )
+ )
+ elif isinstance(node, SequenceNode):
+ implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ seq_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ seq_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ else:
+ end_comment = None
+ self.emitter.emit(
+ SequenceStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ )
+ )
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
+ elif isinstance(node, MappingNode):
+ implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True)
+ comment = node.comment
+ end_comment = None
+ map_comment = None
+ if node.flow_style is True:
+ if comment: # eol comment on flow style sequence
+ map_comment = comment[0]
+ # comment[0] = None
+ if comment and len(comment) > 2:
+ end_comment = comment[2]
+ self.emitter.emit(
+ MappingStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ nr_items=len(node.value),
+ )
+ )
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment]))
+ self.resolver.ascend_resolver()
+
+
+def templated_id(s):
+ # type: (Text) -> Any
+ return Serializer.ANCHOR_RE.match(s)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/setup.cfg b/libs/dynaconf/vendor/ruamel/yaml/setup.cfg
new file mode 100644
index 000000000..8bfd5a12f
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/setup.cfg
@@ -0,0 +1,4 @@
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/libs/dynaconf/vendor/ruamel/yaml/setup.py b/libs/dynaconf/vendor/ruamel/yaml/setup.py
new file mode 100644
index 000000000..f22dceba8
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/setup.py
@@ -0,0 +1,962 @@
+# # header
+# coding: utf-8
+# dd: 20200125
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+# # __init__.py parser
+
+import sys
+import os
+import datetime
+import traceback
+
+sys.path = [path for path in sys.path if path not in [os.getcwd(), ""]]
+import platform # NOQA
+from _ast import * # NOQA
+from ast import parse # NOQA
+
+from setuptools import setup, Extension, Distribution # NOQA
+from setuptools.command import install_lib # NOQA
+from setuptools.command.sdist import sdist as _sdist # NOQA
+
+try:
+ from setuptools.namespaces import Installer as NameSpaceInstaller # NOQA
+except ImportError:
+ msg = ('You should use the latest setuptools. The namespaces.py file that this setup.py'
+ ' uses was added in setuptools 28.7.0 (Oct 2016)')
+ print(msg)
+ sys.exit()
+
+if __name__ != '__main__':
+ raise NotImplementedError('should never include setup.py')
+
+# # definitions
+
+full_package_name = None
+
+if sys.version_info < (3,):
+ string_type = basestring
+else:
+ string_type = str
+
+
+if sys.version_info < (3, 4):
+
+ class Bytes:
+ pass
+
+ class NameConstant:
+ pass
+
+
+if sys.version_info >= (3, 8):
+ from ast import Str, Num, Bytes, NameConstant # NOQA
+
+
+if sys.version_info < (3,):
+ open_kw = dict()
+else:
+ open_kw = dict(encoding='utf-8')
+
+
+if sys.version_info < (2, 7) or platform.python_implementation() == 'Jython':
+
+ class Set:
+ pass
+
+
+if os.environ.get('DVDEBUG', "") == "":
+
+ def debug(*args, **kw):
+ pass
+
+
+else:
+
+ def debug(*args, **kw):
+ with open(os.environ['DVDEBUG'], 'a') as fp:
+ kw1 = kw.copy()
+ kw1['file'] = fp
+ print('{:%Y-%d-%mT%H:%M:%S}'.format(datetime.datetime.now()), file=fp, end=' ')
+ print(*args, **kw1)
+
+
+def literal_eval(node_or_string):
+ """
+ Safely evaluate an expression node or a string containing a Python
+ expression. The string or node provided may only consist of the following
+ Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
+ sets, booleans, and None.
+
+ Even when passing in Unicode, the resulting Str types parsed are 'str' in Python 2.
+ I don't now how to set 'unicode_literals' on parse -> Str is explicitly converted.
+ """
+ _safe_names = {'None': None, 'True': True, 'False': False}
+ if isinstance(node_or_string, string_type):
+ node_or_string = parse(node_or_string, mode='eval')
+ if isinstance(node_or_string, Expression):
+ node_or_string = node_or_string.body
+ else:
+ raise TypeError('only string or AST nodes supported')
+
+ def _convert(node):
+ if isinstance(node, Str):
+ if sys.version_info < (3,) and not isinstance(node.s, unicode):
+ return node.s.decode('utf-8')
+ return node.s
+ elif isinstance(node, Bytes):
+ return node.s
+ elif isinstance(node, Num):
+ return node.n
+ elif isinstance(node, Tuple):
+ return tuple(map(_convert, node.elts))
+ elif isinstance(node, List):
+ return list(map(_convert, node.elts))
+ elif isinstance(node, Set):
+ return set(map(_convert, node.elts))
+ elif isinstance(node, Dict):
+ return dict((_convert(k), _convert(v)) for k, v in zip(node.keys, node.values))
+ elif isinstance(node, NameConstant):
+ return node.value
+ elif sys.version_info < (3, 4) and isinstance(node, Name):
+ if node.id in _safe_names:
+ return _safe_names[node.id]
+ elif (
+ isinstance(node, UnaryOp)
+ and isinstance(node.op, (UAdd, USub))
+ and isinstance(node.operand, (Num, UnaryOp, BinOp))
+ ): # NOQA
+ operand = _convert(node.operand)
+ if isinstance(node.op, UAdd):
+ return +operand
+ else:
+ return -operand
+ elif (
+ isinstance(node, BinOp)
+ and isinstance(node.op, (Add, Sub))
+ and isinstance(node.right, (Num, UnaryOp, BinOp))
+ and isinstance(node.left, (Num, UnaryOp, BinOp))
+ ): # NOQA
+ left = _convert(node.left)
+ right = _convert(node.right)
+ if isinstance(node.op, Add):
+ return left + right
+ else:
+ return left - right
+ elif isinstance(node, Call):
+ func_id = getattr(node.func, 'id', None)
+ if func_id == 'dict':
+ return dict((k.arg, _convert(k.value)) for k in node.keywords)
+ elif func_id == 'set':
+ return set(_convert(node.args[0]))
+ elif func_id == 'date':
+ return datetime.date(*[_convert(k) for k in node.args])
+ elif func_id == 'datetime':
+ return datetime.datetime(*[_convert(k) for k in node.args])
+ err = SyntaxError('malformed node or string: ' + repr(node))
+ err.filename = '<string>'
+ err.lineno = node.lineno
+ err.offset = node.col_offset
+ err.text = repr(node)
+ err.node = node
+ raise err
+
+ return _convert(node_or_string)
+
+
+# parses python ( "= dict( )" ) or ( "= {" )
+def _package_data(fn):
+ data = {}
+ with open(fn, **open_kw) as fp:
+ parsing = False
+ lines = []
+ for line in fp.readlines():
+ if sys.version_info < (3,):
+ line = line.decode('utf-8')
+ if line.startswith('_package_data'):
+ if 'dict(' in line:
+ parsing = 'python'
+ lines.append('dict(\n')
+ elif line.endswith('= {\n'):
+ parsing = 'python'
+ lines.append('{\n')
+ else:
+ raise NotImplementedError
+ continue
+ if not parsing:
+ continue
+ if parsing == 'python':
+ if line.startswith(')') or line.startswith('}'):
+ lines.append(line)
+ try:
+ data = literal_eval("".join(lines))
+ except SyntaxError as e:
+ context = 2
+ from_line = e.lineno - (context + 1)
+ to_line = e.lineno + (context - 1)
+ w = len(str(to_line))
+ for index, line in enumerate(lines):
+ if from_line <= index <= to_line:
+ print(
+ '{0:{1}}: {2}'.format(index, w, line).encode('utf-8'),
+ end="",
+ )
+ if index == e.lineno - 1:
+ print(
+ '{0:{1}} {2}^--- {3}'.format(
+ ' ', w, ' ' * e.offset, e.node
+ )
+ )
+ raise
+ break
+ lines.append(line)
+ else:
+ raise NotImplementedError
+ return data
+
+
+# make sure you can run "python ../some/dir/setup.py install"
+pkg_data = _package_data(__file__.replace('setup.py', '__init__.py'))
+
+exclude_files = ['setup.py']
+
+
+# # helper
+def _check_convert_version(tup):
+ """Create a PEP 386 pseudo-format conformant string from tuple tup."""
+ ret_val = str(tup[0]) # first is always digit
+ next_sep = '.' # separator for next extension, can be "" or "."
+ nr_digits = 0 # nr of adjacent digits in rest, to verify
+ post_dev = False # are we processig post/dev
+ for x in tup[1:]:
+ if isinstance(x, int):
+ nr_digits += 1
+ if nr_digits > 2:
+ raise ValueError('too many consecutive digits after ' + ret_val)
+ ret_val += next_sep + str(x)
+ next_sep = '.'
+ continue
+ first_letter = x[0].lower()
+ next_sep = ""
+ if first_letter in 'abcr':
+ if post_dev:
+ raise ValueError('release level specified after ' 'post/dev: ' + x)
+ nr_digits = 0
+ ret_val += 'rc' if first_letter == 'r' else first_letter
+ elif first_letter in 'pd':
+ nr_digits = 1 # only one can follow
+ post_dev = True
+ ret_val += '.post' if first_letter == 'p' else '.dev'
+ else:
+ raise ValueError('First letter of "' + x + '" not recognised')
+ # .dev and .post need a number otherwise setuptools normalizes and complains
+ if nr_digits == 1 and post_dev:
+ ret_val += '0'
+ return ret_val
+
+
+version_info = pkg_data['version_info']
+version_str = _check_convert_version(version_info)
+
+
+class MyInstallLib(install_lib.install_lib):
+ def install(self):
+ fpp = pkg_data['full_package_name'].split('.') # full package path
+ full_exclude_files = [os.path.join(*(fpp + [x])) for x in exclude_files]
+ alt_files = []
+ outfiles = install_lib.install_lib.install(self)
+ for x in outfiles:
+ for full_exclude_file in full_exclude_files:
+ if full_exclude_file in x:
+ os.remove(x)
+ break
+ else:
+ alt_files.append(x)
+ return alt_files
+
+
+class MySdist(_sdist):
+ def initialize_options(self):
+ _sdist.initialize_options(self)
+ # see pep 527, new uploads should be tar.gz or .zip
+ # fmt = getattr(self, 'tarfmt', None)
+ # because of unicode_literals
+ # self.formats = fmt if fmt else [b'bztar'] if sys.version_info < (3, ) else ['bztar']
+ dist_base = os.environ.get('PYDISTBASE')
+ fpn = getattr(getattr(self, 'nsp', self), 'full_package_name', None)
+ if fpn and dist_base:
+ print('setting distdir {}/{}'.format(dist_base, fpn))
+ self.dist_dir = os.path.join(dist_base, fpn)
+
+
+# try except so this doesn't bomb when you don't have wheel installed, implies
+# generation of wheels in ./dist
+try:
+ from wheel.bdist_wheel import bdist_wheel as _bdist_wheel # NOQA
+
+ class MyBdistWheel(_bdist_wheel):
+ def initialize_options(self):
+ _bdist_wheel.initialize_options(self)
+ dist_base = os.environ.get('PYDISTBASE')
+ fpn = getattr(getattr(self, 'nsp', self), 'full_package_name', None)
+ if fpn and dist_base:
+ print('setting distdir {}/{}'.format(dist_base, fpn))
+ self.dist_dir = os.path.join(dist_base, fpn)
+
+ _bdist_wheel_available = True
+
+except ImportError:
+ _bdist_wheel_available = False
+
+
+class NameSpacePackager(object):
+ def __init__(self, pkg_data):
+ assert isinstance(pkg_data, dict)
+ self._pkg_data = pkg_data
+ self.full_package_name = self.pn(self._pkg_data['full_package_name'])
+ self._split = None
+ self.depth = self.full_package_name.count('.')
+ self.nested = self._pkg_data.get('nested', False)
+ if self.nested:
+ NameSpaceInstaller.install_namespaces = lambda x: None
+ self.command = None
+ self.python_version()
+ self._pkg = [None, None] # required and pre-installable packages
+ if (
+ sys.argv[0] == 'setup.py'
+ and sys.argv[1] == 'install'
+ and '--single-version-externally-managed' not in sys.argv
+ ):
+ if os.environ.get('READTHEDOCS', None) == 'True':
+ os.system('pip install .')
+ sys.exit(0)
+ if not os.environ.get('RUAMEL_NO_PIP_INSTALL_CHECK', False):
+ print('error: you have to install with "pip install ."')
+ sys.exit(1)
+ # If you only support an extension module on Linux, Windows thinks it
+ # is pure. That way you would get pure python .whl files that take
+ # precedence for downloading on Linux over source with compilable C code
+ if self._pkg_data.get('universal'):
+ Distribution.is_pure = lambda *args: True
+ else:
+ Distribution.is_pure = lambda *args: False
+ for x in sys.argv:
+ if x[0] == '-' or x == 'setup.py':
+ continue
+ self.command = x
+ break
+
+ def pn(self, s):
+ if sys.version_info < (3,) and isinstance(s, unicode):
+ return s.encode('utf-8')
+ return s
+
+ @property
+ def split(self):
+ """split the full package name in list of compontents traditionally
+ done by setuptools.find_packages. This routine skips any directories
+ with __init__.py, for which the name starts with "_" or ".", or contain a
+ setup.py/tox.ini (indicating a subpackage)
+ """
+ skip = []
+ if self._split is None:
+ fpn = self.full_package_name.split('.')
+ self._split = []
+ while fpn:
+ self._split.insert(0, '.'.join(fpn))
+ fpn = fpn[:-1]
+ for d in sorted(os.listdir('.')):
+ if not os.path.isdir(d) or d == self._split[0] or d[0] in '._':
+ continue
+ # prevent sub-packages in namespace from being included
+ x = os.path.join(d, '__init__.py')
+ if os.path.exists(x):
+ pd = _package_data(x)
+ if pd.get('nested', False):
+ skip.append(d)
+ continue
+ self._split.append(self.full_package_name + '.' + d)
+ if sys.version_info < (3,):
+ self._split = [
+ (y.encode('utf-8') if isinstance(y, unicode) else y) for y in self._split
+ ]
+ if skip:
+ # this interferes with output checking
+ # print('skipping sub-packages:', ', '.join(skip))
+ pass
+ return self._split
+
+ @property
+ def namespace_packages(self):
+ return self.split[: self.depth]
+
+ def namespace_directories(self, depth=None):
+ """return list of directories where the namespace should be created /
+ can be found
+ """
+ res = []
+ for index, d in enumerate(self.split[:depth]):
+ # toplevel gets a dot
+ if index > 0:
+ d = os.path.join(*d.split('.'))
+ res.append('.' + d)
+ return res
+
+ @property
+ def package_dir(self):
+ d = {
+ # don't specify empty dir, clashes with package_data spec
+ self.full_package_name: '.'
+ }
+ if 'extra_packages' in self._pkg_data:
+ return d
+ if len(self.split) > 1: # only if package namespace
+ d[self.split[0]] = self.namespace_directories(1)[0]
+ return d
+
+ def create_dirs(self):
+ """create the directories necessary for namespace packaging"""
+ directories = self.namespace_directories(self.depth)
+ if not directories:
+ return
+ if not os.path.exists(directories[0]):
+ for d in directories:
+ os.mkdir(d)
+ with open(os.path.join(d, '__init__.py'), 'w') as fp:
+ fp.write(
+ 'import pkg_resources\n' 'pkg_resources.declare_namespace(__name__)\n'
+ )
+
+ def python_version(self):
+ supported = self._pkg_data.get('supported')
+ if supported is None:
+ return
+ if len(supported) == 1:
+ minimum = supported[0]
+ else:
+ for x in supported:
+ if x[0] == sys.version_info[0]:
+ minimum = x
+ break
+ else:
+ return
+ if sys.version_info < minimum:
+ print('minimum python version(s): ' + str(supported))
+ sys.exit(1)
+
+ def check(self):
+ try:
+ from pip.exceptions import InstallationError
+ except ImportError:
+ return
+ # arg is either develop (pip install -e) or install
+ if self.command not in ['install', 'develop']:
+ return
+
+ # if hgi and hgi.base are both in namespace_packages matching
+ # against the top (hgi.) it suffices to find minus-e and non-minus-e
+ # installed packages. As we don't know the order in namespace_packages
+ # do some magic
+ prefix = self.split[0]
+ prefixes = set([prefix, prefix.replace('_', '-')])
+ for p in sys.path:
+ if not p:
+ continue # directory with setup.py
+ if os.path.exists(os.path.join(p, 'setup.py')):
+ continue # some linked in stuff might not be hgi based
+ if not os.path.isdir(p):
+ continue
+ if p.startswith('/tmp/'):
+ continue
+ for fn in os.listdir(p):
+ for pre in prefixes:
+ if fn.startswith(pre):
+ break
+ else:
+ continue
+ full_name = os.path.join(p, fn)
+ # not in prefixes the toplevel is never changed from _ to -
+ if fn == prefix and os.path.isdir(full_name):
+ # directory -> other, non-minus-e, install
+ if self.command == 'develop':
+ raise InstallationError(
+ 'Cannot mix develop (pip install -e),\nwith '
+ 'non-develop installs for package name {0}'.format(fn)
+ )
+ elif fn == prefix:
+ raise InstallationError('non directory package {0} in {1}'.format(fn, p))
+ for pre in [x + '.' for x in prefixes]:
+ if fn.startswith(pre):
+ break
+ else:
+ continue # hgiabc instead of hgi.
+ if fn.endswith('-link') and self.command == 'install':
+ raise InstallationError(
+ 'Cannot mix non-develop with develop\n(pip install -e)'
+ ' installs for package name {0}'.format(fn)
+ )
+
+ def entry_points(self, script_name=None, package_name=None):
+ """normally called without explicit script_name and package name
+ the default console_scripts entry depends on the existence of __main__.py:
+ if that file exists then the function main() in there is used, otherwise
+ the in __init__.py.
+
+ the _package_data entry_points key/value pair can be explicitly specified
+ including a "=" character. If the entry is True or 1 the
+ scriptname is the last part of the full package path (split on '.')
+ if the ep entry is a simple string without "=", that is assumed to be
+ the name of the script.
+ """
+
+ def pckg_entry_point(name):
+ return '{0}{1}:main'.format(
+ name, '.__main__' if os.path.exists('__main__.py') else ""
+ )
+
+ ep = self._pkg_data.get('entry_points', True)
+ if isinstance(ep, dict):
+ return ep
+ if ep is None:
+ return None
+ if ep not in [True, 1]:
+ if '=' in ep:
+ # full specification of the entry point like
+ # entry_points=['yaml = ruamel.yaml.cmd:main'],
+ return {'console_scripts': [ep]}
+ # assume that it is just the script name
+ script_name = ep
+ if package_name is None:
+ package_name = self.full_package_name
+ if not script_name:
+ script_name = package_name.split('.')[-1]
+ return {
+ 'console_scripts': [
+ '{0} = {1}'.format(script_name, pckg_entry_point(package_name))
+ ]
+ }
+
+ @property
+ def url(self):
+ url = self._pkg_data.get('url')
+ if url:
+ return url
+ sp = self.full_package_name
+ for ch in '_.':
+ sp = sp.replace(ch, '-')
+ return 'https://sourceforge.net/p/{0}/code/ci/default/tree'.format(sp)
+
+ @property
+ def author(self):
+ return self._pkg_data['author'] # no get needs to be there
+
+ @property
+ def author_email(self):
+ return self._pkg_data['author_email'] # no get needs to be there
+
+ @property
+ def license(self):
+ """return the license field from _package_data, None means MIT"""
+ lic = self._pkg_data.get('license')
+ if lic is None:
+ # lic_fn = os.path.join(os.path.dirname(__file__), 'LICENSE')
+ # assert os.path.exists(lic_fn)
+ return 'MIT license'
+ return lic
+
+ def has_mit_lic(self):
+ return 'MIT' in self.license
+
+ @property
+ def description(self):
+ return self._pkg_data['description'] # no get needs to be there
+
+ @property
+ def status(self):
+ # αβ
+ status = self._pkg_data.get('status', 'β').lower()
+ if status in ['α', 'alpha']:
+ return (3, 'Alpha')
+ elif status in ['β', 'beta']:
+ return (4, 'Beta')
+ elif 'stable' in status.lower():
+ return (5, 'Production/Stable')
+ raise NotImplementedError
+
+ @property
+ def classifiers(self):
+ """this needs more intelligence, probably splitting the classifiers from _pkg_data
+ and only adding defaults when no explicit entries were provided.
+ Add explicit Python versions in sync with tox.env generation based on python_requires?
+ """
+ attr = '_' + sys._getframe().f_code.co_name
+ if not hasattr(self, attr):
+ setattr(self, attr, self._setup_classifiers())
+ return getattr(self, attr)
+
+ def _setup_classifiers(self):
+ return sorted(
+ set(
+ [
+ 'Development Status :: {0} - {1}'.format(*self.status),
+ 'Intended Audience :: Developers',
+ 'License :: '
+ + ('OSI Approved :: MIT' if self.has_mit_lic() else 'Other/Proprietary')
+ + ' License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ ]
+ + [self.pn(x) for x in self._pkg_data.get('classifiers', [])]
+ )
+ )
+
+ @property
+ def keywords(self):
+ return self.pn(self._pkg_data.get('keywords', []))
+
+ @property
+ def install_requires(self):
+ """list of packages required for installation"""
+ return self._analyse_packages[0]
+
+ @property
+ def install_pre(self):
+ """list of packages required for installation"""
+ return self._analyse_packages[1]
+
+ @property
+ def _analyse_packages(self):
+ """gather from configuration, names starting with * need
+ to be installed explicitly as they are not on PyPI
+ install_requires should be dict, with keys 'any', 'py27' etc
+ or a list (which is as if only 'any' was defined
+
+ ToDo: update with: pep508 conditional dependencies
+ """
+ if self._pkg[0] is None:
+ self._pkg[0] = []
+ self._pkg[1] = []
+
+ ir = self._pkg_data.get('install_requires')
+ if ir is None:
+ return self._pkg # these will be both empty at this point
+ if isinstance(ir, list):
+ self._pkg[0] = ir
+ return self._pkg
+ # 'any' for all builds, 'py27' etc for specifics versions
+ packages = ir.get('any', [])
+ if isinstance(packages, string_type):
+ packages = packages.split() # assume white space separated string
+ if self.nested:
+ # parent dir is also a package, make sure it is installed (need its .pth file)
+ parent_pkg = self.full_package_name.rsplit('.', 1)[0]
+ if parent_pkg not in packages:
+ packages.append(parent_pkg)
+ implementation = platform.python_implementation()
+ if implementation == 'CPython':
+ pyver = 'py{0}{1}'.format(*sys.version_info)
+ elif implementation == 'PyPy':
+ pyver = 'pypy' if sys.version_info < (3,) else 'pypy3'
+ elif implementation == 'Jython':
+ pyver = 'jython'
+ packages.extend(ir.get(pyver, []))
+ for p in packages:
+ # package name starting with * means use local source tree, non-published
+ # to PyPi or maybe not latest version on PyPI -> pre-install
+ if p[0] == '*':
+ p = p[1:]
+ self._pkg[1].append(p)
+ self._pkg[0].append(p)
+ return self._pkg
+
+ @property
+ def extras_require(self):
+ """dict of conditions -> extra packages informaton required for installation
+ as of setuptools 33 doing `package ; python_version<=2.7' in install_requires
+ still doesn't work
+
+ https://www.python.org/dev/peps/pep-0508/
+ https://wheel.readthedocs.io/en/latest/index.html#defining-conditional-dependencies
+ https://hynek.me/articles/conditional-python-dependencies/
+ """
+ ep = self._pkg_data.get('extras_require')
+ return ep
+
+ # @property
+ # def data_files(self):
+ # df = self._pkg_data.get('data_files', [])
+ # if self.has_mit_lic():
+ # df.append('LICENSE')
+ # if not df:
+ # return None
+ # return [('.', df)]
+
+ @property
+ def package_data(self):
+ df = self._pkg_data.get('data_files', [])
+ if self.has_mit_lic():
+ # include the file
+ df.append('LICENSE')
+ # but don't install it
+ exclude_files.append('LICENSE')
+ if self._pkg_data.get('binary_only', False):
+ exclude_files.append('__init__.py')
+ debug('testing<<<<<')
+ if 'Typing :: Typed' in self.classifiers:
+ debug('appending')
+ df.append('py.typed')
+ pd = self._pkg_data.get('package_data', {})
+ if df:
+ pd[self.full_package_name] = df
+ if sys.version_info < (3,):
+ # python2 doesn't seem to like unicode package names as keys
+ # maybe only when the packages themselves are non-unicode
+ for k in pd:
+ if isinstance(k, unicode):
+ pd[str(k)] = pd.pop(k)
+ # for k in pd:
+ # pd[k] = [e.encode('utf-8') for e in pd[k]] # de-unicode
+ return pd
+
+ @property
+ def packages(self):
+ s = self.split
+ # fixed this in package_data, the keys there must be non-unicode for py27
+ # if sys.version_info < (3, 0):
+ # s = [x.encode('utf-8') for x in self.split]
+ return s + self._pkg_data.get('extra_packages', [])
+
+ @property
+ def python_requires(self):
+ return self._pkg_data.get('python_requires', None)
+
+ @property
+ def ext_modules(self):
+ """
+ Check if all modules specified in the value for 'ext_modules' can be build.
+ That value (if not None) is a list of dicts with 'name', 'src', 'lib'
+ Optional 'test' can be used to make sure trying to compile will work on the host
+
+ creates and return the external modules as Extensions, unless that
+ is not necessary at all for the action (like --version)
+
+ test existence of compiler by using export CC=nonexistent; export CXX=nonexistent
+ """
+
+ if hasattr(self, '_ext_modules'):
+ return self._ext_modules
+ if '--version' in sys.argv:
+ return None
+ if platform.python_implementation() == 'Jython':
+ return None
+ try:
+ plat = sys.argv.index('--plat-name')
+ if 'win' in sys.argv[plat + 1]:
+ return None
+ except ValueError:
+ pass
+ self._ext_modules = []
+ no_test_compile = False
+ if '--restructuredtext' in sys.argv:
+ no_test_compile = True
+ elif 'sdist' in sys.argv:
+ no_test_compile = True
+ if no_test_compile:
+ for target in self._pkg_data.get('ext_modules', []):
+ ext = Extension(
+ self.pn(target['name']),
+ sources=[self.pn(x) for x in target['src']],
+ libraries=[self.pn(x) for x in target.get('lib')],
+ )
+ self._ext_modules.append(ext)
+ return self._ext_modules
+
+ print('sys.argv', sys.argv)
+ import tempfile
+ import shutil
+ from textwrap import dedent
+
+ import distutils.sysconfig
+ import distutils.ccompiler
+ from distutils.errors import CompileError, LinkError
+
+ for target in self._pkg_data.get('ext_modules', []): # list of dicts
+ ext = Extension(
+ self.pn(target['name']),
+ sources=[self.pn(x) for x in target['src']],
+ libraries=[self.pn(x) for x in target.get('lib')],
+ )
+ # debug('test1 in target', 'test' in target, target)
+ if 'test' not in target: # no test, just hope it works
+ self._ext_modules.append(ext)
+ continue
+ if sys.version_info[:2] == (3, 4) and platform.system() == 'Windows':
+ # this is giving problems on appveyor, so skip
+ if 'FORCE_C_BUILD_TEST' not in os.environ:
+ self._ext_modules.append(ext)
+ continue
+ # write a temporary .c file to compile
+ c_code = dedent(target['test'])
+ try:
+ tmp_dir = tempfile.mkdtemp(prefix='tmp_ruamel_')
+ bin_file_name = 'test' + self.pn(target['name'])
+ print('test compiling', bin_file_name)
+ file_name = os.path.join(tmp_dir, bin_file_name + '.c')
+ with open(file_name, 'w') as fp: # write source
+ fp.write(c_code)
+ # and try to compile it
+ compiler = distutils.ccompiler.new_compiler()
+ assert isinstance(compiler, distutils.ccompiler.CCompiler)
+ # do any platform specific initialisations
+ distutils.sysconfig.customize_compiler(compiler)
+ # make sure you can reach header files because compile does change dir
+ compiler.add_include_dir(os.getcwd())
+ if sys.version_info < (3,):
+ tmp_dir = tmp_dir.encode('utf-8')
+ # used to be a different directory, not necessary
+ compile_out_dir = tmp_dir
+ try:
+ compiler.link_executable(
+ compiler.compile([file_name], output_dir=compile_out_dir),
+ bin_file_name,
+ output_dir=tmp_dir,
+ libraries=ext.libraries,
+ )
+ except CompileError:
+ debug('compile error:', file_name)
+ print('compile error:', file_name)
+ continue
+ except LinkError:
+ debug('link error', file_name)
+ print('link error', file_name)
+ continue
+ self._ext_modules.append(ext)
+ except Exception as e: # NOQA
+ debug('Exception:', e)
+ print('Exception:', e)
+ if sys.version_info[:2] == (3, 4) and platform.system() == 'Windows':
+ traceback.print_exc()
+ finally:
+ shutil.rmtree(tmp_dir)
+ return self._ext_modules
+
+ @property
+ def test_suite(self):
+ return self._pkg_data.get('test_suite')
+
+ def wheel(self, kw, setup):
+ """temporary add setup.cfg if creating a wheel to include LICENSE file
+ https://bitbucket.org/pypa/wheel/issues/47
+ """
+ if 'bdist_wheel' not in sys.argv:
+ return False
+ file_name = 'setup.cfg'
+ if os.path.exists(file_name): # add it if not in there?
+ return False
+ with open(file_name, 'w') as fp:
+ if os.path.exists('LICENSE'):
+ fp.write('[metadata]\nlicense-file = LICENSE\n')
+ else:
+ print('\n\n>>>>>> LICENSE file not found <<<<<\n\n')
+ if self._pkg_data.get('universal'):
+ fp.write('[bdist_wheel]\nuniversal = 1\n')
+ try:
+ setup(**kw)
+ except Exception:
+ raise
+ finally:
+ os.remove(file_name)
+ return True
+
+
+# # call setup
+def main():
+ dump_kw = '--dump-kw'
+ if dump_kw in sys.argv:
+ import wheel
+ import distutils
+ import setuptools
+
+ print('python: ', sys.version)
+ print('setuptools:', setuptools.__version__)
+ print('distutils: ', distutils.__version__)
+ print('wheel: ', wheel.__version__)
+ nsp = NameSpacePackager(pkg_data)
+ nsp.check()
+ nsp.create_dirs()
+ MySdist.nsp = nsp
+ if pkg_data.get('tarfmt'):
+ MySdist.tarfmt = pkg_data.get('tarfmt')
+
+ cmdclass = dict(install_lib=MyInstallLib, sdist=MySdist)
+ if _bdist_wheel_available:
+ MyBdistWheel.nsp = nsp
+ cmdclass['bdist_wheel'] = MyBdistWheel
+
+ kw = dict(
+ name=nsp.full_package_name,
+ namespace_packages=nsp.namespace_packages,
+ version=version_str,
+ packages=nsp.packages,
+ python_requires=nsp.python_requires,
+ url=nsp.url,
+ author=nsp.author,
+ author_email=nsp.author_email,
+ cmdclass=cmdclass,
+ package_dir=nsp.package_dir,
+ entry_points=nsp.entry_points(),
+ description=nsp.description,
+ install_requires=nsp.install_requires,
+ extras_require=nsp.extras_require, # available since setuptools 18.0 / 2015-06
+ license=nsp.license,
+ classifiers=nsp.classifiers,
+ keywords=nsp.keywords,
+ package_data=nsp.package_data,
+ ext_modules=nsp.ext_modules,
+ test_suite=nsp.test_suite,
+ )
+
+ if '--version' not in sys.argv and ('--verbose' in sys.argv or dump_kw in sys.argv):
+ for k in sorted(kw):
+ v = kw[k]
+ print(' "{0}": "{1}",'.format(k, v))
+ # if '--record' in sys.argv:
+ # return
+ if dump_kw in sys.argv:
+ sys.argv.remove(dump_kw)
+ try:
+ with open('README.rst') as fp:
+ kw['long_description'] = fp.read()
+ kw['long_description_content_type'] = 'text/x-rst'
+ except Exception:
+ pass
+
+ if nsp.wheel(kw, setup):
+ return
+ for x in ['-c', 'egg_info', '--egg-base', 'pip-egg-info']:
+ if x not in sys.argv:
+ break
+ else:
+ # we're doing a tox setup install any starred package by searching up the source tree
+ # until you match your/package/name for your.package.name
+ for p in nsp.install_pre:
+ import subprocess
+
+ # search other source
+ setup_path = os.path.join(*p.split('.') + ['setup.py'])
+ try_dir = os.path.dirname(sys.executable)
+ while len(try_dir) > 1:
+ full_path_setup_py = os.path.join(try_dir, setup_path)
+ if os.path.exists(full_path_setup_py):
+ pip = sys.executable.replace('python', 'pip')
+ cmd = [pip, 'install', os.path.dirname(full_path_setup_py)]
+ # with open('/var/tmp/notice', 'a') as fp:
+ # print('installing', cmd, file=fp)
+ subprocess.check_output(cmd)
+ break
+ try_dir = os.path.dirname(try_dir)
+ setup(**kw)
+
+
+main()
diff --git a/libs/dynaconf/vendor/ruamel/yaml/timestamp.py b/libs/dynaconf/vendor/ruamel/yaml/timestamp.py
new file mode 100644
index 000000000..374e4c0f0
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/timestamp.py
@@ -0,0 +1,28 @@
+# coding: utf-8
+
+from __future__ import print_function, absolute_import, division, unicode_literals
+
+import datetime
+import copy
+
+# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object
+# a more complete datetime might be used by safe loading as well
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List # NOQA
+
+
+class TimeStamp(datetime.datetime):
+ def __init__(self, *args, **kw):
+ # type: (Any, Any) -> None
+ self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any]
+
+ def __new__(cls, *args, **kw): # datetime is immutable
+ # type: (Any, Any) -> Any
+ return datetime.datetime.__new__(cls, *args, **kw) # type: ignore
+
+ def __deepcopy__(self, memo):
+ # type: (Any) -> Any
+ ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second)
+ ts._yaml = copy.deepcopy(self._yaml)
+ return ts
diff --git a/libs/dynaconf/vendor/ruamel/yaml/tokens.py b/libs/dynaconf/vendor/ruamel/yaml/tokens.py
new file mode 100644
index 000000000..5f5a66353
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/tokens.py
@@ -0,0 +1,286 @@
+# # header
+# coding: utf-8
+
+from __future__ import unicode_literals
+
+if False: # MYPY
+ from typing import Text, Any, Dict, Optional, List # NOQA
+ from .error import StreamMark # NOQA
+
+SHOWLINES = True
+
+
+class Token(object):
+ __slots__ = 'start_mark', 'end_mark', '_comment'
+
+ def __init__(self, start_mark, end_mark):
+ # type: (StreamMark, StreamMark) -> None
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+ def __repr__(self):
+ # type: () -> Any
+ # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and
+ # hasattr('self', key)]
+ attributes = [key for key in self.__slots__ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes])
+ if SHOWLINES:
+ try:
+ arguments += ', line: ' + str(self.start_mark.line)
+ except: # NOQA
+ pass
+ try:
+ arguments += ', comment: ' + str(self._comment)
+ except: # NOQA
+ pass
+ return '{}({})'.format(self.__class__.__name__, arguments)
+
+ def add_post_comment(self, comment):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ self._comment[0] = comment
+
+ def add_pre_comments(self, comments):
+ # type: (Any) -> None
+ if not hasattr(self, '_comment'):
+ self._comment = [None, None]
+ assert self._comment[1] is None
+ self._comment[1] = comments
+
+ def get_comment(self):
+ # type: () -> Any
+ return getattr(self, '_comment', None)
+
+ @property
+ def comment(self):
+ # type: () -> Any
+ return getattr(self, '_comment', None)
+
+ def move_comment(self, target, empty=False):
+ # type: (Any, bool) -> Any
+ """move a comment from this token to target (normally next token)
+ used to combine e.g. comments before a BlockEntryToken to the
+ ScalarToken that follows it
+ empty is a special for empty values -> comment after key
+ """
+ c = self.comment
+ if c is None:
+ return
+ # don't push beyond last element
+ if isinstance(target, (StreamEndToken, DocumentStartToken)):
+ return
+ delattr(self, '_comment')
+ tc = target.comment
+ if not tc: # target comment, just insert
+ # special for empty value in key: value issue 25
+ if empty:
+ c = [c[0], c[1], None, None, c[0]]
+ target._comment = c
+ # nprint('mco2:', self, target, target.comment, empty)
+ return self
+ if c[0] and tc[0] or c[1] and tc[1]:
+ raise NotImplementedError('overlap in comment %r %r' % (c, tc))
+ if c[0]:
+ tc[0] = c[0]
+ if c[1]:
+ tc[1] = c[1]
+ return self
+
+ def split_comment(self):
+ # type: () -> Any
+ """ split the post part of a comment, and return it
+ as comment to be added. Delete second part if [None, None]
+ abc: # this goes to sequence
+ # this goes to first element
+ - first element
+ """
+ comment = self.comment
+ if comment is None or comment[0] is None:
+ return None # nothing to do
+ ret_val = [comment[0], None]
+ if comment[1] is None:
+ delattr(self, '_comment')
+ return ret_val
+
+
+# class BOMToken(Token):
+# id = '<byte order mark>'
+
+
+class DirectiveToken(Token):
+ __slots__ = 'name', 'value'
+ id = '<directive>'
+
+ def __init__(self, name, value, start_mark, end_mark):
+ # type: (Any, Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.name = name
+ self.value = value
+
+
+class DocumentStartToken(Token):
+ __slots__ = ()
+ id = '<document start>'
+
+
+class DocumentEndToken(Token):
+ __slots__ = ()
+ id = '<document end>'
+
+
+class StreamStartToken(Token):
+ __slots__ = ('encoding',)
+ id = '<stream start>'
+
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.encoding = encoding
+
+
+class StreamEndToken(Token):
+ __slots__ = ()
+ id = '<stream end>'
+
+
+class BlockSequenceStartToken(Token):
+ __slots__ = ()
+ id = '<block sequence start>'
+
+
+class BlockMappingStartToken(Token):
+ __slots__ = ()
+ id = '<block mapping start>'
+
+
+class BlockEndToken(Token):
+ __slots__ = ()
+ id = '<block end>'
+
+
+class FlowSequenceStartToken(Token):
+ __slots__ = ()
+ id = '['
+
+
+class FlowMappingStartToken(Token):
+ __slots__ = ()
+ id = '{'
+
+
+class FlowSequenceEndToken(Token):
+ __slots__ = ()
+ id = ']'
+
+
+class FlowMappingEndToken(Token):
+ __slots__ = ()
+ id = '}'
+
+
+class KeyToken(Token):
+ __slots__ = ()
+ id = '?'
+
+ # def x__repr__(self):
+ # return 'KeyToken({})'.format(
+ # self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0])
+
+
+class ValueToken(Token):
+ __slots__ = ()
+ id = ':'
+
+
+class BlockEntryToken(Token):
+ __slots__ = ()
+ id = '-'
+
+
+class FlowEntryToken(Token):
+ __slots__ = ()
+ id = ','
+
+
+class AliasToken(Token):
+ __slots__ = ('value',)
+ id = '<alias>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class AnchorToken(Token):
+ __slots__ = ('value',)
+ id = '<anchor>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class TagToken(Token):
+ __slots__ = ('value',)
+ id = '<tag>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+
+class ScalarToken(Token):
+ __slots__ = 'value', 'plain', 'style'
+ id = '<scalar>'
+
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ # type: (Any, Any, Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+ self.plain = plain
+ self.style = style
+
+
+class CommentToken(Token):
+ __slots__ = 'value', 'pre_done'
+ id = '<comment>'
+
+ def __init__(self, value, start_mark, end_mark):
+ # type: (Any, Any, Any) -> None
+ Token.__init__(self, start_mark, end_mark)
+ self.value = value
+
+ def reset(self):
+ # type: () -> None
+ if hasattr(self, 'pre_done'):
+ delattr(self, 'pre_done')
+
+ def __repr__(self):
+ # type: () -> Any
+ v = '{!r}'.format(self.value)
+ if SHOWLINES:
+ try:
+ v += ', line: ' + str(self.start_mark.line)
+ v += ', col: ' + str(self.start_mark.column)
+ except: # NOQA
+ pass
+ return 'CommentToken({})'.format(v)
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ if self.start_mark != other.start_mark:
+ return False
+ if self.end_mark != other.end_mark:
+ return False
+ if self.value != other.value:
+ return False
+ return True
+
+ def __ne__(self, other):
+ # type: (Any) -> bool
+ return not self.__eq__(other)
diff --git a/libs/dynaconf/vendor/ruamel/yaml/util.py b/libs/dynaconf/vendor/ruamel/yaml/util.py
new file mode 100644
index 000000000..3eb7d7613
--- /dev/null
+++ b/libs/dynaconf/vendor/ruamel/yaml/util.py
@@ -0,0 +1,190 @@
+# coding: utf-8
+
+"""
+some helper functions that might be generally useful
+"""
+
+from __future__ import absolute_import, print_function
+
+from functools import partial
+import re
+
+from .compat import text_type, binary_type
+
+if False: # MYPY
+ from typing import Any, Dict, Optional, List, Text # NOQA
+ from .compat import StreamTextType # NOQA
+
+
+class LazyEval(object):
+ """
+ Lightweight wrapper around lazily evaluated func(*args, **kwargs).
+
+ func is only evaluated when any attribute of its return value is accessed.
+ Every attribute access is passed through to the wrapped value.
+ (This only excludes special cases like method-wrappers, e.g., __hash__.)
+ The sole additional attribute is the lazy_self function which holds the
+ return value (or, prior to evaluation, func and arguments), in its closure.
+ """
+
+ def __init__(self, func, *args, **kwargs):
+ # type: (Any, Any, Any) -> None
+ def lazy_self():
+ # type: () -> Any
+ return_value = func(*args, **kwargs)
+ object.__setattr__(self, 'lazy_self', lambda: return_value)
+ return return_value
+
+ object.__setattr__(self, 'lazy_self', lazy_self)
+
+ def __getattribute__(self, name):
+ # type: (Any) -> Any
+ lazy_self = object.__getattribute__(self, 'lazy_self')
+ if name == 'lazy_self':
+ return lazy_self
+ return getattr(lazy_self(), name)
+
+ def __setattr__(self, name, value):
+ # type: (Any, Any) -> None
+ setattr(self.lazy_self(), name, value)
+
+
+RegExp = partial(LazyEval, re.compile)
+
+
+# originally as comment
+# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605
+# if you use this in your code, I suggest adding a test in your test suite
+# that check this routines output against a known piece of your YAML
+# before upgrades to this code break your round-tripped YAML
+def load_yaml_guess_indent(stream, **kw):
+ # type: (StreamTextType, Any) -> Any
+ """guess the indent and block sequence indent of yaml stream/string
+
+ returns round_trip_loaded stream, indent level, block sequence indent
+ - block sequence indent is the number of spaces before a dash relative to previous indent
+ - if there are no block sequences, indent is taken from nested mappings, block sequence
+ indent is unset (None) in that case
+ """
+ from .main import round_trip_load
+
+ # load a yaml file guess the indentation, if you use TABs ...
+ def leading_spaces(l):
+ # type: (Any) -> int
+ idx = 0
+ while idx < len(l) and l[idx] == ' ':
+ idx += 1
+ return idx
+
+ if isinstance(stream, text_type):
+ yaml_str = stream # type: Any
+ elif isinstance(stream, binary_type):
+ # most likely, but the Reader checks BOM for this
+ yaml_str = stream.decode('utf-8')
+ else:
+ yaml_str = stream.read()
+ map_indent = None
+ indent = None # default if not found for some reason
+ block_seq_indent = None
+ prev_line_key_only = None
+ key_indent = 0
+ for line in yaml_str.splitlines():
+ rline = line.rstrip()
+ lline = rline.lstrip()
+ if lline.startswith('- '):
+ l_s = leading_spaces(line)
+ block_seq_indent = l_s - key_indent
+ idx = l_s + 1
+ while line[idx] == ' ': # this will end as we rstripped
+ idx += 1
+ if line[idx] == '#': # comment after -
+ continue
+ indent = idx - key_indent
+ break
+ if map_indent is None and prev_line_key_only is not None and rline:
+ idx = 0
+ while line[idx] in ' -':
+ idx += 1
+ if idx > prev_line_key_only:
+ map_indent = idx - prev_line_key_only
+ if rline.endswith(':'):
+ key_indent = leading_spaces(line)
+ idx = 0
+ while line[idx] == ' ': # this will end on ':'
+ idx += 1
+ prev_line_key_only = idx
+ continue
+ prev_line_key_only = None
+ if indent is None and map_indent is not None:
+ indent = map_indent
+ return round_trip_load(yaml_str, **kw), indent, block_seq_indent
+
+
+def configobj_walker(cfg):
+ # type: (Any) -> Any
+ """
+ walks over a ConfigObj (INI file with comments) generating
+ corresponding YAML output (including comments
+ """
+ from configobj import ConfigObj # type: ignore
+
+ assert isinstance(cfg, ConfigObj)
+ for c in cfg.initial_comment:
+ if c.strip():
+ yield c
+ for s in _walk_section(cfg):
+ if s.strip():
+ yield s
+ for c in cfg.final_comment:
+ if c.strip():
+ yield c
+
+
+def _walk_section(s, level=0):
+ # type: (Any, int) -> Any
+ from configobj import Section
+
+ assert isinstance(s, Section)
+ indent = u' ' * level
+ for name in s.scalars:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ x = s[name]
+ if u'\n' in x:
+ i = indent + u' '
+ x = u'|\n' + i + x.strip().replace(u'\n', u'\n' + i)
+ elif ':' in x:
+ x = u"'" + x.replace(u"'", u"''") + u"'"
+ line = u'{0}{1}: {2}'.format(indent, name, x)
+ c = s.inline_comments[name]
+ if c:
+ line += u' ' + c
+ yield line
+ for name in s.sections:
+ for c in s.comments[name]:
+ yield indent + c.strip()
+ line = u'{0}{1}:'.format(indent, name)
+ c = s.inline_comments[name]
+ if c:
+ line += u' ' + c
+ yield line
+ for val in _walk_section(s[name], level=level + 1):
+ yield val
+
+
+# def config_obj_2_rt_yaml(cfg):
+# from .comments import CommentedMap, CommentedSeq
+# from configobj import ConfigObj
+# assert isinstance(cfg, ConfigObj)
+# #for c in cfg.initial_comment:
+# # if c.strip():
+# # pass
+# cm = CommentedMap()
+# for name in s.sections:
+# cm[name] = d = CommentedMap()
+#
+#
+# #for c in cfg.final_comment:
+# # if c.strip():
+# # yield c
+# return cm
diff --git a/libs/dynaconf/vendor/source b/libs/dynaconf/vendor/source
new file mode 100644
index 000000000..fb3c10f92
--- /dev/null
+++ b/libs/dynaconf/vendor/source
@@ -0,0 +1,4 @@
+THIS FILE EXISTS ONLY TO INDICATE THAT THIS DIRECTORY
+CONTAINS SOURCE FILES FOR VENDORED LIBRARIES
+
+DURING RELEASE PROCESS THOSE FILES ARE MINIFIED.
diff --git a/libs/dynaconf/vendor/toml/DEPRECATION.txt b/libs/dynaconf/vendor/toml/DEPRECATION.txt
new file mode 100644
index 000000000..25cec54b9
--- /dev/null
+++ b/libs/dynaconf/vendor/toml/DEPRECATION.txt
@@ -0,0 +1,3 @@
+This lib will be deprecated on 4.0.0
+toml_loader and all the other places
+will default to tomllib.
diff --git a/libs/dynaconf/vendor/toml/__init__.py b/libs/dynaconf/vendor/toml/__init__.py
new file mode 100644
index 000000000..338d74c17
--- /dev/null
+++ b/libs/dynaconf/vendor/toml/__init__.py
@@ -0,0 +1,25 @@
+"""Python module which parses and emits TOML.
+
+Released under the MIT license.
+"""
+
+from . import encoder
+from . import decoder
+
+__version__ = "0.10.1"
+_spec_ = "0.5.0"
+
+load = decoder.load
+loads = decoder.loads
+TomlDecoder = decoder.TomlDecoder
+TomlDecodeError = decoder.TomlDecodeError
+TomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder
+
+dump = encoder.dump
+dumps = encoder.dumps
+TomlEncoder = encoder.TomlEncoder
+TomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder
+TomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder
+TomlNumpyEncoder = encoder.TomlNumpyEncoder
+TomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder
+TomlPathlibEncoder = encoder.TomlPathlibEncoder
diff --git a/libs/dynaconf/vendor/toml/decoder.py b/libs/dynaconf/vendor/toml/decoder.py
new file mode 100644
index 000000000..9229733f0
--- /dev/null
+++ b/libs/dynaconf/vendor/toml/decoder.py
@@ -0,0 +1,1052 @@
+import datetime
+import io
+from os import linesep
+import re
+import sys
+
+from .tz import TomlTz
+
+if sys.version_info < (3,):
+ _range = xrange # noqa: F821
+else:
+ unicode = str
+ _range = range
+ basestring = str
+ unichr = chr
+
+
+def _detect_pathlib_path(p):
+ if (3, 4) <= sys.version_info:
+ import pathlib
+ if isinstance(p, pathlib.PurePath):
+ return True
+ return False
+
+
+def _ispath(p):
+ if isinstance(p, (bytes, basestring)):
+ return True
+ return _detect_pathlib_path(p)
+
+
+def _getpath(p):
+ if (3, 6) <= sys.version_info:
+ import os
+ return os.fspath(p)
+ if _detect_pathlib_path(p):
+ return str(p)
+ return p
+
+
+try:
+ FNFError = FileNotFoundError
+except NameError:
+ FNFError = IOError
+
+
+TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
+
+
+class TomlDecodeError(ValueError):
+ """Base toml Exception / Error."""
+
+ def __init__(self, msg, doc, pos):
+ lineno = doc.count('\n', 0, pos) + 1
+ colno = pos - doc.rfind('\n', 0, pos)
+ emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos)
+ ValueError.__init__(self, emsg)
+ self.msg = msg
+ self.doc = doc
+ self.pos = pos
+ self.lineno = lineno
+ self.colno = colno
+
+
+# Matches a TOML number, which allows underscores for readability
+_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
+
+
+class CommentValue(object):
+ def __init__(self, val, comment, beginline, _dict):
+ self.val = val
+ separator = "\n" if beginline else " "
+ self.comment = separator + comment
+ self._dict = _dict
+
+ def __getitem__(self, key):
+ return self.val[key]
+
+ def __setitem__(self, key, value):
+ self.val[key] = value
+
+ def dump(self, dump_value_func):
+ retstr = dump_value_func(self.val)
+ if isinstance(self.val, self._dict):
+ return self.comment + "\n" + unicode(retstr)
+ else:
+ return unicode(retstr) + self.comment
+
+
+def _strictly_valid_num(n):
+ n = n.strip()
+ if not n:
+ return False
+ if n[0] == '_':
+ return False
+ if n[-1] == '_':
+ return False
+ if "_." in n or "._" in n:
+ return False
+ if len(n) == 1:
+ return True
+ if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']:
+ return False
+ if n[0] == '+' or n[0] == '-':
+ n = n[1:]
+ if len(n) > 1 and n[0] == '0' and n[1] != '.':
+ return False
+ if '__' in n:
+ return False
+ return True
+
+
+def load(f, _dict=dict, decoder=None):
+ """Parses named file or files as toml and returns a dictionary
+
+ Args:
+ f: Path to the file to open, array of files to read into single dict
+ or a file descriptor
+ _dict: (optional) Specifies the class of the returned toml dictionary
+ decoder: The decoder to use
+
+ Returns:
+ Parsed toml file represented as a dictionary
+
+ Raises:
+ TypeError -- When f is invalid type
+ TomlDecodeError: Error while decoding toml
+ IOError / FileNotFoundError -- When an array with no valid (existing)
+ (Python 2 / Python 3) file paths is passed
+ """
+
+ if _ispath(f):
+ with io.open(_getpath(f), encoding='utf-8') as ffile:
+ return loads(ffile.read(), _dict, decoder)
+ elif isinstance(f, list):
+ from os import path as op
+ from warnings import warn
+ if not [path for path in f if op.exists(path)]:
+ error_msg = "Load expects a list to contain filenames only."
+ error_msg += linesep
+ error_msg += ("The list needs to contain the path of at least one "
+ "existing file.")
+ raise FNFError(error_msg)
+ if decoder is None:
+ decoder = TomlDecoder(_dict)
+ d = decoder.get_empty_table()
+ for l in f: # noqa: E741
+ if op.exists(l):
+ d.update(load(l, _dict, decoder))
+ else:
+ warn("Non-existent filename in list with at least one valid "
+ "filename")
+ return d
+ else:
+ try:
+ return loads(f.read(), _dict, decoder)
+ except AttributeError:
+ raise TypeError("You can only load a file descriptor, filename or "
+ "list")
+
+
+_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
+
+
+def loads(s, _dict=dict, decoder=None):
+ """Parses string as toml
+
+ Args:
+ s: String to be parsed
+ _dict: (optional) Specifies the class of the returned toml dictionary
+
+ Returns:
+ Parsed toml file represented as a dictionary
+
+ Raises:
+ TypeError: When a non-string is passed
+ TomlDecodeError: Error while decoding toml
+ """
+
+ implicitgroups = []
+ if decoder is None:
+ decoder = TomlDecoder(_dict)
+ retval = decoder.get_empty_table()
+ currentlevel = retval
+ if not isinstance(s, basestring):
+ raise TypeError("Expecting something like a string")
+
+ if not isinstance(s, unicode):
+ s = s.decode('utf8')
+
+ original = s
+ sl = list(s)
+ openarr = 0
+ openstring = False
+ openstrchar = ""
+ multilinestr = False
+ arrayoftables = False
+ beginline = True
+ keygroup = False
+ dottedkey = False
+ keyname = 0
+ key = ''
+ prev_key = ''
+ line_no = 1
+
+ for i, item in enumerate(sl):
+ if item == '\r' and sl[i + 1] == '\n':
+ sl[i] = ' '
+ continue
+ if keyname:
+ key += item
+ if item == '\n':
+ raise TomlDecodeError("Key name found without value."
+ " Reached end of line.", original, i)
+ if openstring:
+ if item == openstrchar:
+ oddbackslash = False
+ k = 1
+ while i >= k and sl[i - k] == '\\':
+ oddbackslash = not oddbackslash
+ k += 1
+ if not oddbackslash:
+ keyname = 2
+ openstring = False
+ openstrchar = ""
+ continue
+ elif keyname == 1:
+ if item.isspace():
+ keyname = 2
+ continue
+ elif item == '.':
+ dottedkey = True
+ continue
+ elif item.isalnum() or item == '_' or item == '-':
+ continue
+ elif (dottedkey and sl[i - 1] == '.' and
+ (item == '"' or item == "'")):
+ openstring = True
+ openstrchar = item
+ continue
+ elif keyname == 2:
+ if item.isspace():
+ if dottedkey:
+ nextitem = sl[i + 1]
+ if not nextitem.isspace() and nextitem != '.':
+ keyname = 1
+ continue
+ if item == '.':
+ dottedkey = True
+ nextitem = sl[i + 1]
+ if not nextitem.isspace() and nextitem != '.':
+ keyname = 1
+ continue
+ if item == '=':
+ keyname = 0
+ prev_key = key[:-1].rstrip()
+ key = ''
+ dottedkey = False
+ else:
+ raise TomlDecodeError("Found invalid character in key name: '" +
+ item + "'. Try quoting the key name.",
+ original, i)
+ if item == "'" and openstrchar != '"':
+ k = 1
+ try:
+ while sl[i - k] == "'":
+ k += 1
+ if k == 3:
+ break
+ except IndexError:
+ pass
+ if k == 3:
+ multilinestr = not multilinestr
+ openstring = multilinestr
+ else:
+ openstring = not openstring
+ if openstring:
+ openstrchar = "'"
+ else:
+ openstrchar = ""
+ if item == '"' and openstrchar != "'":
+ oddbackslash = False
+ k = 1
+ tripquote = False
+ try:
+ while sl[i - k] == '"':
+ k += 1
+ if k == 3:
+ tripquote = True
+ break
+ if k == 1 or (k == 3 and tripquote):
+ while sl[i - k] == '\\':
+ oddbackslash = not oddbackslash
+ k += 1
+ except IndexError:
+ pass
+ if not oddbackslash:
+ if tripquote:
+ multilinestr = not multilinestr
+ openstring = multilinestr
+ else:
+ openstring = not openstring
+ if openstring:
+ openstrchar = '"'
+ else:
+ openstrchar = ""
+ if item == '#' and (not openstring and not keygroup and
+ not arrayoftables):
+ j = i
+ comment = ""
+ try:
+ while sl[j] != '\n':
+ comment += s[j]
+ sl[j] = ' '
+ j += 1
+ except IndexError:
+ break
+ if not openarr:
+ decoder.preserve_comment(line_no, prev_key, comment, beginline)
+ if item == '[' and (not openstring and not keygroup and
+ not arrayoftables):
+ if beginline:
+ if len(sl) > i + 1 and sl[i + 1] == '[':
+ arrayoftables = True
+ else:
+ keygroup = True
+ else:
+ openarr += 1
+ if item == ']' and not openstring:
+ if keygroup:
+ keygroup = False
+ elif arrayoftables:
+ if sl[i - 1] == ']':
+ arrayoftables = False
+ else:
+ openarr -= 1
+ if item == '\n':
+ if openstring or multilinestr:
+ if not multilinestr:
+ raise TomlDecodeError("Unbalanced quotes", original, i)
+ if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
+ sl[i - 2] == sl[i - 1])):
+ sl[i] = sl[i - 1]
+ if sl[i - 3] == sl[i - 1]:
+ sl[i - 3] = ' '
+ elif openarr:
+ sl[i] = ' '
+ else:
+ beginline = True
+ line_no += 1
+ elif beginline and sl[i] != ' ' and sl[i] != '\t':
+ beginline = False
+ if not keygroup and not arrayoftables:
+ if sl[i] == '=':
+ raise TomlDecodeError("Found empty keyname. ", original, i)
+ keyname = 1
+ key += item
+ if keyname:
+ raise TomlDecodeError("Key name found without value."
+ " Reached end of file.", original, len(s))
+ if openstring: # reached EOF and have an unterminated string
+ raise TomlDecodeError("Unterminated string found."
+ " Reached end of file.", original, len(s))
+ s = ''.join(sl)
+ s = s.split('\n')
+ multikey = None
+ multilinestr = ""
+ multibackslash = False
+ pos = 0
+ for idx, line in enumerate(s):
+ if idx > 0:
+ pos += len(s[idx - 1]) + 1
+
+ decoder.embed_comments(idx, currentlevel)
+
+ if not multilinestr or multibackslash or '\n' not in multilinestr:
+ line = line.strip()
+ if line == "" and (not multikey or multibackslash):
+ continue
+ if multikey:
+ if multibackslash:
+ multilinestr += line
+ else:
+ multilinestr += line
+ multibackslash = False
+ closed = False
+ if multilinestr[0] == '[':
+ closed = line[-1] == ']'
+ elif len(line) > 2:
+ closed = (line[-1] == multilinestr[0] and
+ line[-2] == multilinestr[0] and
+ line[-3] == multilinestr[0])
+ if closed:
+ try:
+ value, vtype = decoder.load_value(multilinestr)
+ except ValueError as err:
+ raise TomlDecodeError(str(err), original, pos)
+ currentlevel[multikey] = value
+ multikey = None
+ multilinestr = ""
+ else:
+ k = len(multilinestr) - 1
+ while k > -1 and multilinestr[k] == '\\':
+ multibackslash = not multibackslash
+ k -= 1
+ if multibackslash:
+ multilinestr = multilinestr[:-1]
+ else:
+ multilinestr += "\n"
+ continue
+ if line[0] == '[':
+ arrayoftables = False
+ if len(line) == 1:
+ raise TomlDecodeError("Opening key group bracket on line by "
+ "itself.", original, pos)
+ if line[1] == '[':
+ arrayoftables = True
+ line = line[2:]
+ splitstr = ']]'
+ else:
+ line = line[1:]
+ splitstr = ']'
+ i = 1
+ quotesplits = decoder._get_split_on_quotes(line)
+ quoted = False
+ for quotesplit in quotesplits:
+ if not quoted and splitstr in quotesplit:
+ break
+ i += quotesplit.count(splitstr)
+ quoted = not quoted
+ line = line.split(splitstr, i)
+ if len(line) < i + 1 or line[-1].strip() != "":
+ raise TomlDecodeError("Key group not on a line by itself.",
+ original, pos)
+ groups = splitstr.join(line[:-1]).split('.')
+ i = 0
+ while i < len(groups):
+ groups[i] = groups[i].strip()
+ if len(groups[i]) > 0 and (groups[i][0] == '"' or
+ groups[i][0] == "'"):
+ groupstr = groups[i]
+ j = i + 1
+ while not groupstr[0] == groupstr[-1]:
+ j += 1
+ if j > len(groups) + 2:
+ raise TomlDecodeError("Invalid group name '" +
+ groupstr + "' Something " +
+ "went wrong.", original, pos)
+ groupstr = '.'.join(groups[i:j]).strip()
+ groups[i] = groupstr[1:-1]
+ groups[i + 1:j] = []
+ else:
+ if not _groupname_re.match(groups[i]):
+ raise TomlDecodeError("Invalid group name '" +
+ groups[i] + "'. Try quoting it.",
+ original, pos)
+ i += 1
+ currentlevel = retval
+ for i in _range(len(groups)):
+ group = groups[i]
+ if group == "":
+ raise TomlDecodeError("Can't have a keygroup with an empty "
+ "name", original, pos)
+ try:
+ currentlevel[group]
+ if i == len(groups) - 1:
+ if group in implicitgroups:
+ implicitgroups.remove(group)
+ if arrayoftables:
+ raise TomlDecodeError("An implicitly defined "
+ "table can't be an array",
+ original, pos)
+ elif arrayoftables:
+ currentlevel[group].append(decoder.get_empty_table()
+ )
+ else:
+ raise TomlDecodeError("What? " + group +
+ " already exists?" +
+ str(currentlevel),
+ original, pos)
+ except TypeError:
+ currentlevel = currentlevel[-1]
+ if group not in currentlevel:
+ currentlevel[group] = decoder.get_empty_table()
+ if i == len(groups) - 1 and arrayoftables:
+ currentlevel[group] = [decoder.get_empty_table()]
+ except KeyError:
+ if i != len(groups) - 1:
+ implicitgroups.append(group)
+ currentlevel[group] = decoder.get_empty_table()
+ if i == len(groups) - 1 and arrayoftables:
+ currentlevel[group] = [decoder.get_empty_table()]
+ currentlevel = currentlevel[group]
+ if arrayoftables:
+ try:
+ currentlevel = currentlevel[-1]
+ except KeyError:
+ pass
+ elif line[0] == "{":
+ if line[-1] != "}":
+ raise TomlDecodeError("Line breaks are not allowed in inline"
+ "objects", original, pos)
+ try:
+ decoder.load_inline_object(line, currentlevel, multikey,
+ multibackslash)
+ except ValueError as err:
+ raise TomlDecodeError(str(err), original, pos)
+ elif "=" in line:
+ try:
+ ret = decoder.load_line(line, currentlevel, multikey,
+ multibackslash)
+ except ValueError as err:
+ raise TomlDecodeError(str(err), original, pos)
+ if ret is not None:
+ multikey, multilinestr, multibackslash = ret
+ return retval
+
+
+def _load_date(val):
+ microsecond = 0
+ tz = None
+ try:
+ if len(val) > 19:
+ if val[19] == '.':
+ if val[-1].upper() == 'Z':
+ subsecondval = val[20:-1]
+ tzval = "Z"
+ else:
+ subsecondvalandtz = val[20:]
+ if '+' in subsecondvalandtz:
+ splitpoint = subsecondvalandtz.index('+')
+ subsecondval = subsecondvalandtz[:splitpoint]
+ tzval = subsecondvalandtz[splitpoint:]
+ elif '-' in subsecondvalandtz:
+ splitpoint = subsecondvalandtz.index('-')
+ subsecondval = subsecondvalandtz[:splitpoint]
+ tzval = subsecondvalandtz[splitpoint:]
+ else:
+ tzval = None
+ subsecondval = subsecondvalandtz
+ if tzval is not None:
+ tz = TomlTz(tzval)
+ microsecond = int(int(subsecondval) *
+ (10 ** (6 - len(subsecondval))))
+ else:
+ tz = TomlTz(val[19:])
+ except ValueError:
+ tz = None
+ if "-" not in val[1:]:
+ return None
+ try:
+ if len(val) == 10:
+ d = datetime.date(
+ int(val[:4]), int(val[5:7]),
+ int(val[8:10]))
+ else:
+ d = datetime.datetime(
+ int(val[:4]), int(val[5:7]),
+ int(val[8:10]), int(val[11:13]),
+ int(val[14:16]), int(val[17:19]), microsecond, tz)
+ except ValueError:
+ return None
+ return d
+
+
+def _load_unicode_escapes(v, hexbytes, prefix):
+ skip = False
+ i = len(v) - 1
+ while i > -1 and v[i] == '\\':
+ skip = not skip
+ i -= 1
+ for hx in hexbytes:
+ if skip:
+ skip = False
+ i = len(hx) - 1
+ while i > -1 and hx[i] == '\\':
+ skip = not skip
+ i -= 1
+ v += prefix
+ v += hx
+ continue
+ hxb = ""
+ i = 0
+ hxblen = 4
+ if prefix == "\\U":
+ hxblen = 8
+ hxb = ''.join(hx[i:i + hxblen]).lower()
+ if hxb.strip('0123456789abcdef'):
+ raise ValueError("Invalid escape sequence: " + hxb)
+ if hxb[0] == "d" and hxb[1].strip('01234567'):
+ raise ValueError("Invalid escape sequence: " + hxb +
+ ". Only scalar unicode points are allowed.")
+ v += unichr(int(hxb, 16))
+ v += unicode(hx[len(hxb):])
+ return v
+
+
+# Unescape TOML string values.
+
+# content after the \
+_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
+# What it should be replaced by
+_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
+# Used for substitution
+_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
+
+
+def _unescape(v):
+ """Unescape characters in a TOML string."""
+ i = 0
+ backslash = False
+ while i < len(v):
+ if backslash:
+ backslash = False
+ if v[i] in _escapes:
+ v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
+ elif v[i] == '\\':
+ v = v[:i - 1] + v[i:]
+ elif v[i] == 'u' or v[i] == 'U':
+ i += 1
+ else:
+ raise ValueError("Reserved escape sequence used")
+ continue
+ elif v[i] == '\\':
+ backslash = True
+ i += 1
+ return v
+
+
+class InlineTableDict(object):
+ """Sentinel subclass of dict for inline tables."""
+
+
+class TomlDecoder(object):
+
+ def __init__(self, _dict=dict):
+ self._dict = _dict
+
+ def get_empty_table(self):
+ return self._dict()
+
+ def get_empty_inline_table(self):
+ class DynamicInlineTableDict(self._dict, InlineTableDict):
+ """Concrete sentinel subclass for inline tables.
+ It is a subclass of _dict which is passed in dynamically at load
+ time
+
+ It is also a subclass of InlineTableDict
+ """
+
+ return DynamicInlineTableDict()
+
+ def load_inline_object(self, line, currentlevel, multikey=False,
+ multibackslash=False):
+ candidate_groups = line[1:-1].split(",")
+ groups = []
+ if len(candidate_groups) == 1 and not candidate_groups[0].strip():
+ candidate_groups.pop()
+ while len(candidate_groups) > 0:
+ candidate_group = candidate_groups.pop(0)
+ try:
+ _, value = candidate_group.split('=', 1)
+ except ValueError:
+ raise ValueError("Invalid inline table encountered")
+ value = value.strip()
+ if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
+ value[0] in '-0123456789' or
+ value in ('true', 'false') or
+ (value[0] == "[" and value[-1] == "]") or
+ (value[0] == '{' and value[-1] == '}'))):
+ groups.append(candidate_group)
+ elif len(candidate_groups) > 0:
+ candidate_groups[0] = (candidate_group + "," +
+ candidate_groups[0])
+ else:
+ raise ValueError("Invalid inline table value encountered")
+ for group in groups:
+ status = self.load_line(group, currentlevel, multikey,
+ multibackslash)
+ if status is not None:
+ break
+
+ def _get_split_on_quotes(self, line):
+ doublequotesplits = line.split('"')
+ quoted = False
+ quotesplits = []
+ if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
+ singlequotesplits = doublequotesplits[0].split("'")
+ doublequotesplits = doublequotesplits[1:]
+ while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
+ singlequotesplits[-1] += '"' + doublequotesplits[0]
+ doublequotesplits = doublequotesplits[1:]
+ if "'" in singlequotesplits[-1]:
+ singlequotesplits = (singlequotesplits[:-1] +
+ singlequotesplits[-1].split("'"))
+ quotesplits += singlequotesplits
+ for doublequotesplit in doublequotesplits:
+ if quoted:
+ quotesplits.append(doublequotesplit)
+ else:
+ quotesplits += doublequotesplit.split("'")
+ quoted = not quoted
+ return quotesplits
+
+ def load_line(self, line, currentlevel, multikey, multibackslash):
+ i = 1
+ quotesplits = self._get_split_on_quotes(line)
+ quoted = False
+ for quotesplit in quotesplits:
+ if not quoted and '=' in quotesplit:
+ break
+ i += quotesplit.count('=')
+ quoted = not quoted
+ pair = line.split('=', i)
+ strictly_valid = _strictly_valid_num(pair[-1])
+ if _number_with_underscores.match(pair[-1]):
+ pair[-1] = pair[-1].replace('_', '')
+ while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
+ pair[-1][0] != "'" and pair[-1][0] != '"' and
+ pair[-1][0] != '[' and pair[-1][0] != '{' and
+ pair[-1].strip() != 'true' and
+ pair[-1].strip() != 'false'):
+ try:
+ float(pair[-1])
+ break
+ except ValueError:
+ pass
+ if _load_date(pair[-1]) is not None:
+ break
+ if TIME_RE.match(pair[-1]):
+ break
+ i += 1
+ prev_val = pair[-1]
+ pair = line.split('=', i)
+ if prev_val == pair[-1]:
+ raise ValueError("Invalid date or number")
+ if strictly_valid:
+ strictly_valid = _strictly_valid_num(pair[-1])
+ pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
+ if '.' in pair[0]:
+ if '"' in pair[0] or "'" in pair[0]:
+ quotesplits = self._get_split_on_quotes(pair[0])
+ quoted = False
+ levels = []
+ for quotesplit in quotesplits:
+ if quoted:
+ levels.append(quotesplit)
+ else:
+ levels += [level.strip() for level in
+ quotesplit.split('.')]
+ quoted = not quoted
+ else:
+ levels = pair[0].split('.')
+ while levels[-1] == "":
+ levels = levels[:-1]
+ for level in levels[:-1]:
+ if level == "":
+ continue
+ if level not in currentlevel:
+ currentlevel[level] = self.get_empty_table()
+ currentlevel = currentlevel[level]
+ pair[0] = levels[-1].strip()
+ elif (pair[0][0] == '"' or pair[0][0] == "'") and \
+ (pair[0][-1] == pair[0][0]):
+ pair[0] = _unescape(pair[0][1:-1])
+ k, koffset = self._load_line_multiline_str(pair[1])
+ if k > -1:
+ while k > -1 and pair[1][k + koffset] == '\\':
+ multibackslash = not multibackslash
+ k -= 1
+ if multibackslash:
+ multilinestr = pair[1][:-1]
+ else:
+ multilinestr = pair[1] + "\n"
+ multikey = pair[0]
+ else:
+ value, vtype = self.load_value(pair[1], strictly_valid)
+ try:
+ currentlevel[pair[0]]
+ raise ValueError("Duplicate keys!")
+ except TypeError:
+ raise ValueError("Duplicate keys!")
+ except KeyError:
+ if multikey:
+ return multikey, multilinestr, multibackslash
+ else:
+ currentlevel[pair[0]] = value
+
+ def _load_line_multiline_str(self, p):
+ poffset = 0
+ if len(p) < 3:
+ return -1, poffset
+ if p[0] == '[' and (p.strip()[-1] != ']' and
+ self._load_array_isstrarray(p)):
+ newp = p[1:].strip().split(',')
+ while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
+ newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
+ newp = newp[-1]
+ poffset = len(p) - len(newp)
+ p = newp
+ if p[0] != '"' and p[0] != "'":
+ return -1, poffset
+ if p[1] != p[0] or p[2] != p[0]:
+ return -1, poffset
+ if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
+ return -1, poffset
+ return len(p) - 1, poffset
+
+ def load_value(self, v, strictly_valid=True):
+ if not v:
+ raise ValueError("Empty value is invalid")
+ if v == 'true':
+ return (True, "bool")
+ elif v == 'false':
+ return (False, "bool")
+ elif v[0] == '"' or v[0] == "'":
+ quotechar = v[0]
+ testv = v[1:].split(quotechar)
+ triplequote = False
+ triplequotecount = 0
+ if len(testv) > 1 and testv[0] == '' and testv[1] == '':
+ testv = testv[2:]
+ triplequote = True
+ closed = False
+ for tv in testv:
+ if tv == '':
+ if triplequote:
+ triplequotecount += 1
+ else:
+ closed = True
+ else:
+ oddbackslash = False
+ try:
+ i = -1
+ j = tv[i]
+ while j == '\\':
+ oddbackslash = not oddbackslash
+ i -= 1
+ j = tv[i]
+ except IndexError:
+ pass
+ if not oddbackslash:
+ if closed:
+ raise ValueError("Found tokens after a closed " +
+ "string. Invalid TOML.")
+ else:
+ if not triplequote or triplequotecount > 1:
+ closed = True
+ else:
+ triplequotecount = 0
+ if quotechar == '"':
+ escapeseqs = v.split('\\')[1:]
+ backslash = False
+ for i in escapeseqs:
+ if i == '':
+ backslash = not backslash
+ else:
+ if i[0] not in _escapes and (i[0] != 'u' and
+ i[0] != 'U' and
+ not backslash):
+ raise ValueError("Reserved escape sequence used")
+ if backslash:
+ backslash = False
+ for prefix in ["\\u", "\\U"]:
+ if prefix in v:
+ hexbytes = v.split(prefix)
+ v = _load_unicode_escapes(hexbytes[0], hexbytes[1:],
+ prefix)
+ v = _unescape(v)
+ if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or
+ v[1] == v[2]):
+ v = v[2:-2]
+ return (v[1:-1], "str")
+ elif v[0] == '[':
+ return (self.load_array(v), "array")
+ elif v[0] == '{':
+ inline_object = self.get_empty_inline_table()
+ self.load_inline_object(v, inline_object)
+ return (inline_object, "inline_object")
+ elif TIME_RE.match(v):
+ h, m, s, _, ms = TIME_RE.match(v).groups()
+ time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0)
+ return (time, "time")
+ else:
+ parsed_date = _load_date(v)
+ if parsed_date is not None:
+ return (parsed_date, "date")
+ if not strictly_valid:
+ raise ValueError("Weirdness with leading zeroes or "
+ "underscores in your number.")
+ itype = "int"
+ neg = False
+ if v[0] == '-':
+ neg = True
+ v = v[1:]
+ elif v[0] == '+':
+ v = v[1:]
+ v = v.replace('_', '')
+ lowerv = v.lower()
+ if '.' in v or ('x' not in v and ('e' in v or 'E' in v)):
+ if '.' in v and v.split('.', 1)[1] == '':
+ raise ValueError("This float is missing digits after "
+ "the point")
+ if v[0] not in '0123456789':
+ raise ValueError("This float doesn't have a leading "
+ "digit")
+ v = float(v)
+ itype = "float"
+ elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'):
+ v = float(v)
+ itype = "float"
+ if itype == "int":
+ v = int(v, 0)
+ if neg:
+ return (0 - v, itype)
+ return (v, itype)
+
+ def bounded_string(self, s):
+ if len(s) == 0:
+ return True
+ if s[-1] != s[0]:
+ return False
+ i = -2
+ backslash = False
+ while len(s) + i > 0:
+ if s[i] == "\\":
+ backslash = not backslash
+ i -= 1
+ else:
+ break
+ return not backslash
+
+ def _load_array_isstrarray(self, a):
+ a = a[1:-1].strip()
+ if a != '' and (a[0] == '"' or a[0] == "'"):
+ return True
+ return False
+
+ def load_array(self, a):
+ atype = None
+ retval = []
+ a = a.strip()
+ if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
+ strarray = self._load_array_isstrarray(a)
+ if not a[1:-1].strip().startswith('{'):
+ a = a[1:-1].split(',')
+ else:
+ # a is an inline object, we must find the matching parenthesis
+ # to define groups
+ new_a = []
+ start_group_index = 1
+ end_group_index = 2
+ open_bracket_count = 1 if a[start_group_index] == '{' else 0
+ in_str = False
+ while end_group_index < len(a[1:]):
+ if a[end_group_index] == '"' or a[end_group_index] == "'":
+ if in_str:
+ backslash_index = end_group_index - 1
+ while (backslash_index > -1 and
+ a[backslash_index] == '\\'):
+ in_str = not in_str
+ backslash_index -= 1
+ in_str = not in_str
+ if not in_str and a[end_group_index] == '{':
+ open_bracket_count += 1
+ if in_str or a[end_group_index] != '}':
+ end_group_index += 1
+ continue
+ elif a[end_group_index] == '}' and open_bracket_count > 1:
+ open_bracket_count -= 1
+ end_group_index += 1
+ continue
+
+ # Increase end_group_index by 1 to get the closing bracket
+ end_group_index += 1
+
+ new_a.append(a[start_group_index:end_group_index])
+
+ # The next start index is at least after the closing
+ # bracket, a closing bracket can be followed by a comma
+ # since we are in an array.
+ start_group_index = end_group_index + 1
+ while (start_group_index < len(a[1:]) and
+ a[start_group_index] != '{'):
+ start_group_index += 1
+ end_group_index = start_group_index + 1
+ a = new_a
+ b = 0
+ if strarray:
+ while b < len(a) - 1:
+ ab = a[b].strip()
+ while (not self.bounded_string(ab) or
+ (len(ab) > 2 and
+ ab[0] == ab[1] == ab[2] and
+ ab[-2] != ab[0] and
+ ab[-3] != ab[0])):
+ a[b] = a[b] + ',' + a[b + 1]
+ ab = a[b].strip()
+ if b < len(a) - 2:
+ a = a[:b + 1] + a[b + 2:]
+ else:
+ a = a[:b + 1]
+ b += 1
+ else:
+ al = list(a[1:-1])
+ a = []
+ openarr = 0
+ j = 0
+ for i in _range(len(al)):
+ if al[i] == '[':
+ openarr += 1
+ elif al[i] == ']':
+ openarr -= 1
+ elif al[i] == ',' and not openarr:
+ a.append(''.join(al[j:i]))
+ j = i + 1
+ a.append(''.join(al[j:]))
+ for i in _range(len(a)):
+ a[i] = a[i].strip()
+ if a[i] != '':
+ nval, ntype = self.load_value(a[i])
+ if atype:
+ if ntype != atype:
+ raise ValueError("Not a homogeneous array")
+ else:
+ atype = ntype
+ retval.append(nval)
+ return retval
+
+ def preserve_comment(self, line_no, key, comment, beginline):
+ pass
+
+ def embed_comments(self, idx, currentlevel):
+ pass
+
+
+class TomlPreserveCommentDecoder(TomlDecoder):
+
+ def __init__(self, _dict=dict):
+ self.saved_comments = {}
+ super(TomlPreserveCommentDecoder, self).__init__(_dict)
+
+ def preserve_comment(self, line_no, key, comment, beginline):
+ self.saved_comments[line_no] = (key, comment, beginline)
+
+ def embed_comments(self, idx, currentlevel):
+ if idx not in self.saved_comments:
+ return
+
+ key, comment, beginline = self.saved_comments[idx]
+ currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
+ self._dict)
diff --git a/libs/dynaconf/vendor/toml/encoder.py b/libs/dynaconf/vendor/toml/encoder.py
new file mode 100644
index 000000000..f908f2719
--- /dev/null
+++ b/libs/dynaconf/vendor/toml/encoder.py
@@ -0,0 +1,304 @@
+import datetime
+import re
+import sys
+from decimal import Decimal
+
+from .decoder import InlineTableDict
+
+if sys.version_info >= (3,):
+ unicode = str
+
+
+def dump(o, f, encoder=None):
+ """Writes out dict as toml to a file
+
+ Args:
+ o: Object to dump into toml
+ f: File descriptor where the toml should be stored
+ encoder: The ``TomlEncoder`` to use for constructing the output string
+
+ Returns:
+ String containing the toml corresponding to dictionary
+
+ Raises:
+ TypeError: When anything other than file descriptor is passed
+ """
+
+ if not f.write:
+ raise TypeError("You can only dump an object to a file descriptor")
+ d = dumps(o, encoder=encoder)
+ f.write(d)
+ return d
+
+
+def dumps(o, encoder=None):
+ """Stringifies input dict as toml
+
+ Args:
+ o: Object to dump into toml
+ encoder: The ``TomlEncoder`` to use for constructing the output string
+
+ Returns:
+ String containing the toml corresponding to dict
+
+ Examples:
+ ```python
+ >>> import toml
+ >>> output = {
+ ... 'a': "I'm a string",
+ ... 'b': ["I'm", "a", "list"],
+ ... 'c': 2400
+ ... }
+ >>> toml.dumps(output)
+ 'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n'
+ ```
+ """
+
+ retval = ""
+ if encoder is None:
+ encoder = TomlEncoder(o.__class__)
+ addtoretval, sections = encoder.dump_sections(o, "")
+ retval += addtoretval
+ outer_objs = [id(o)]
+ while sections:
+ section_ids = [id(section) for section in sections]
+ for outer_obj in outer_objs:
+ if outer_obj in section_ids:
+ raise ValueError("Circular reference detected")
+ outer_objs += section_ids
+ newsections = encoder.get_empty_table()
+ for section in sections:
+ addtoretval, addtosections = encoder.dump_sections(
+ sections[section], section)
+
+ if addtoretval or (not addtoretval and not addtosections):
+ if retval and retval[-2:] != "\n\n":
+ retval += "\n"
+ retval += "[" + section + "]\n"
+ if addtoretval:
+ retval += addtoretval
+ for s in addtosections:
+ newsections[section + "." + s] = addtosections[s]
+ sections = newsections
+ return retval
+
+
+def _dump_str(v):
+ if sys.version_info < (3,) and hasattr(v, 'decode') and isinstance(v, str):
+ v = v.decode('utf-8')
+ v = "%r" % v
+ if v[0] == 'u':
+ v = v[1:]
+ singlequote = v.startswith("'")
+ if singlequote or v.startswith('"'):
+ v = v[1:-1]
+ if singlequote:
+ v = v.replace("\\'", "'")
+ v = v.replace('"', '\\"')
+ v = v.split("\\x")
+ while len(v) > 1:
+ i = -1
+ if not v[0]:
+ v = v[1:]
+ v[0] = v[0].replace("\\\\", "\\")
+ # No, I don't know why != works and == breaks
+ joinx = v[0][i] != "\\"
+ while v[0][:i] and v[0][i] == "\\":
+ joinx = not joinx
+ i -= 1
+ if joinx:
+ joiner = "x"
+ else:
+ joiner = "u00"
+ v = [v[0] + joiner + v[1]] + v[2:]
+ return unicode('"' + v[0] + '"')
+
+
+def _dump_float(v):
+ return "{}".format(v).replace("e+0", "e+").replace("e-0", "e-")
+
+
+def _dump_time(v):
+ utcoffset = v.utcoffset()
+ if utcoffset is None:
+ return v.isoformat()
+ # The TOML norm specifies that it's local time thus we drop the offset
+ return v.isoformat()[:-6]
+
+
+class TomlEncoder(object):
+
+ def __init__(self, _dict=dict, preserve=False):
+ self._dict = _dict
+ self.preserve = preserve
+ self.dump_funcs = {
+ str: _dump_str,
+ unicode: _dump_str,
+ list: self.dump_list,
+ bool: lambda v: unicode(v).lower(),
+ int: lambda v: v,
+ float: _dump_float,
+ Decimal: _dump_float,
+ datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'),
+ datetime.time: _dump_time,
+ datetime.date: lambda v: v.isoformat()
+ }
+
+ def get_empty_table(self):
+ return self._dict()
+
+ def dump_list(self, v):
+ retval = "["
+ for u in v:
+ retval += " " + unicode(self.dump_value(u)) + ","
+ retval += "]"
+ return retval
+
+ def dump_inline_table(self, section):
+ """Preserve inline table in its compact syntax instead of expanding
+ into subsection.
+
+ https://github.com/toml-lang/toml#user-content-inline-table
+ """
+ retval = ""
+ if isinstance(section, dict):
+ val_list = []
+ for k, v in section.items():
+ val = self.dump_inline_table(v)
+ val_list.append(k + " = " + val)
+ retval += "{ " + ", ".join(val_list) + " }\n"
+ return retval
+ else:
+ return unicode(self.dump_value(section))
+
+ def dump_value(self, v):
+ # Lookup function corresponding to v's type
+ dump_fn = self.dump_funcs.get(type(v))
+ if dump_fn is None and hasattr(v, '__iter__'):
+ dump_fn = self.dump_funcs[list]
+ # Evaluate function (if it exists) else return v
+ return dump_fn(v) if dump_fn is not None else self.dump_funcs[str](v)
+
+ def dump_sections(self, o, sup):
+ retstr = ""
+ if sup != "" and sup[-1] != ".":
+ sup += '.'
+ retdict = self._dict()
+ arraystr = ""
+ for section in o:
+ section = unicode(section)
+ qsection = section
+ if not re.match(r'^[A-Za-z0-9_-]+$', section):
+ qsection = _dump_str(section)
+ if not isinstance(o[section], dict):
+ arrayoftables = False
+ if isinstance(o[section], list):
+ for a in o[section]:
+ if isinstance(a, dict):
+ arrayoftables = True
+ if arrayoftables:
+ for a in o[section]:
+ arraytabstr = "\n"
+ arraystr += "[[" + sup + qsection + "]]\n"
+ s, d = self.dump_sections(a, sup + qsection)
+ if s:
+ if s[0] == "[":
+ arraytabstr += s
+ else:
+ arraystr += s
+ while d:
+ newd = self._dict()
+ for dsec in d:
+ s1, d1 = self.dump_sections(d[dsec], sup +
+ qsection + "." +
+ dsec)
+ if s1:
+ arraytabstr += ("[" + sup + qsection +
+ "." + dsec + "]\n")
+ arraytabstr += s1
+ for s1 in d1:
+ newd[dsec + "." + s1] = d1[s1]
+ d = newd
+ arraystr += arraytabstr
+ else:
+ if o[section] is not None:
+ retstr += (qsection + " = " +
+ unicode(self.dump_value(o[section])) + '\n')
+ elif self.preserve and isinstance(o[section], InlineTableDict):
+ retstr += (qsection + " = " +
+ self.dump_inline_table(o[section]))
+ else:
+ retdict[qsection] = o[section]
+ retstr += arraystr
+ return (retstr, retdict)
+
+
+class TomlPreserveInlineDictEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict):
+ super(TomlPreserveInlineDictEncoder, self).__init__(_dict, True)
+
+
+class TomlArraySeparatorEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False, separator=","):
+ super(TomlArraySeparatorEncoder, self).__init__(_dict, preserve)
+ if separator.strip() == "":
+ separator = "," + separator
+ elif separator.strip(' \t\n\r,'):
+ raise ValueError("Invalid separator for arrays")
+ self.separator = separator
+
+ def dump_list(self, v):
+ t = []
+ retval = "["
+ for u in v:
+ t.append(self.dump_value(u))
+ while t != []:
+ s = []
+ for u in t:
+ if isinstance(u, list):
+ for r in u:
+ s.append(r)
+ else:
+ retval += " " + unicode(u) + self.separator
+ t = s
+ retval += "]"
+ return retval
+
+
+class TomlNumpyEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False):
+ import numpy as np
+ super(TomlNumpyEncoder, self).__init__(_dict, preserve)
+ self.dump_funcs[np.float16] = _dump_float
+ self.dump_funcs[np.float32] = _dump_float
+ self.dump_funcs[np.float64] = _dump_float
+ self.dump_funcs[np.int16] = self._dump_int
+ self.dump_funcs[np.int32] = self._dump_int
+ self.dump_funcs[np.int64] = self._dump_int
+
+ def _dump_int(self, v):
+ return "{}".format(int(v))
+
+
+class TomlPreserveCommentEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False):
+ from dynaconf.vendor.toml.decoder import CommentValue
+ super(TomlPreserveCommentEncoder, self).__init__(_dict, preserve)
+ self.dump_funcs[CommentValue] = lambda v: v.dump(self.dump_value)
+
+
+class TomlPathlibEncoder(TomlEncoder):
+
+ def _dump_pathlib_path(self, v):
+ return _dump_str(str(v))
+
+ def dump_value(self, v):
+ if (3, 4) <= sys.version_info:
+ import pathlib
+ if isinstance(v, pathlib.PurePath):
+ v = str(v)
+ return super(TomlPathlibEncoder, self).dump_value(v)
diff --git a/libs/dynaconf/vendor/toml/ordered.py b/libs/dynaconf/vendor/toml/ordered.py
new file mode 100644
index 000000000..6b8d9c19c
--- /dev/null
+++ b/libs/dynaconf/vendor/toml/ordered.py
@@ -0,0 +1,15 @@
+from collections import OrderedDict
+from . import TomlEncoder
+from . import TomlDecoder
+
+
+class TomlOrderedDecoder(TomlDecoder):
+
+ def __init__(self):
+ super(self.__class__, self).__init__(_dict=OrderedDict)
+
+
+class TomlOrderedEncoder(TomlEncoder):
+
+ def __init__(self):
+ super(self.__class__, self).__init__(_dict=OrderedDict)
diff --git a/libs/dynaconf/vendor/toml/tz.py b/libs/dynaconf/vendor/toml/tz.py
new file mode 100644
index 000000000..93c3c8ad2
--- /dev/null
+++ b/libs/dynaconf/vendor/toml/tz.py
@@ -0,0 +1,21 @@
+from datetime import tzinfo, timedelta
+
+
+class TomlTz(tzinfo):
+ def __init__(self, toml_offset):
+ if toml_offset == "Z":
+ self._raw_offset = "+00:00"
+ else:
+ self._raw_offset = toml_offset
+ self._sign = -1 if self._raw_offset[0] == '-' else 1
+ self._hours = int(self._raw_offset[1:3])
+ self._minutes = int(self._raw_offset[4:6])
+
+ def tzname(self, dt):
+ return "UTC" + self._raw_offset
+
+ def utcoffset(self, dt):
+ return self._sign * timedelta(hours=self._hours, minutes=self._minutes)
+
+ def dst(self, dt):
+ return timedelta(0)
diff --git a/libs/dynaconf/vendor/tomllib/__init__.py b/libs/dynaconf/vendor/tomllib/__init__.py
new file mode 100644
index 000000000..c4da93df5
--- /dev/null
+++ b/libs/dynaconf/vendor/tomllib/__init__.py
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+
+__all__ = (
+ "loads",
+ "load",
+ "TOMLDecodeError",
+ "dump",
+ "dumps",
+)
+
+from ._parser import TOMLDecodeError, load, loads
+from ._writer import dump, dumps
+
+# Pretend this exception was created here.
+TOMLDecodeError.__module__ = __name__
diff --git a/libs/dynaconf/vendor/tomllib/_parser.py b/libs/dynaconf/vendor/tomllib/_parser.py
new file mode 100644
index 000000000..e1b3214fe
--- /dev/null
+++ b/libs/dynaconf/vendor/tomllib/_parser.py
@@ -0,0 +1,690 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+
+from __future__ import annotations
+
+from collections.abc import Iterable
+import string
+from types import MappingProxyType
+from typing import Any, BinaryIO, NamedTuple
+
+from ._re import (
+ RE_DATETIME,
+ RE_LOCALTIME,
+ RE_NUMBER,
+ match_to_datetime,
+ match_to_localtime,
+ match_to_number,
+)
+from ._types import Key, ParseFloat, Pos
+
+ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
+
+# Neither of these sets include quotation mark or backslash. They are
+# currently handled as separate cases in the parser functions.
+ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
+ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
+
+ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
+ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
+
+ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
+
+TOML_WS = frozenset(" \t")
+TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
+BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
+KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
+HEXDIGIT_CHARS = frozenset(string.hexdigits)
+
+BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
+ {
+ "\\b": "\u0008", # backspace
+ "\\t": "\u0009", # tab
+ "\\n": "\u000A", # linefeed
+ "\\f": "\u000C", # form feed
+ "\\r": "\u000D", # carriage return
+ '\\"': "\u0022", # quote
+ "\\\\": "\u005C", # backslash
+ }
+)
+
+
+class TOMLDecodeError(ValueError):
+ """An error raised if a document is not valid TOML."""
+
+
+def load(fp: BinaryIO, /, *, parse_float: ParseFloat = float) -> dict[str, Any]:
+ """Parse TOML from a binary file object."""
+ b = fp.read()
+ try:
+ s = b.decode()
+ except AttributeError:
+ raise TypeError(
+ "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
+ ) from None
+ return loads(s, parse_float=parse_float)
+
+
+def loads(s: str, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
+ """Parse TOML from a string."""
+
+ # The spec allows converting "\r\n" to "\n", even in string
+ # literals. Let's do so to simplify parsing.
+ src = s.replace("\r\n", "\n")
+ pos = 0
+ out = Output(NestedDict(), Flags())
+ header: Key = ()
+ parse_float = make_safe_parse_float(parse_float)
+
+ # Parse one statement at a time
+ # (typically means one line in TOML source)
+ while True:
+ # 1. Skip line leading whitespace
+ pos = skip_chars(src, pos, TOML_WS)
+
+ # 2. Parse rules. Expect one of the following:
+ # - end of file
+ # - end of line
+ # - comment
+ # - key/value pair
+ # - append dict to list (and move to its namespace)
+ # - create dict (and move to its namespace)
+ # Skip trailing whitespace when applicable.
+ try:
+ char = src[pos]
+ except IndexError:
+ break
+ if char == "\n":
+ pos += 1
+ continue
+ if char in KEY_INITIAL_CHARS:
+ pos = key_value_rule(src, pos, out, header, parse_float)
+ pos = skip_chars(src, pos, TOML_WS)
+ elif char == "[":
+ try:
+ second_char: str | None = src[pos + 1]
+ except IndexError:
+ second_char = None
+ out.flags.finalize_pending()
+ if second_char == "[":
+ pos, header = create_list_rule(src, pos, out)
+ else:
+ pos, header = create_dict_rule(src, pos, out)
+ pos = skip_chars(src, pos, TOML_WS)
+ elif char != "#":
+ raise suffixed_err(src, pos, "Invalid statement")
+
+ # 3. Skip comment
+ pos = skip_comment(src, pos)
+
+ # 4. Expect end of line or end of file
+ try:
+ char = src[pos]
+ except IndexError:
+ break
+ if char != "\n":
+ raise suffixed_err(
+ src, pos, "Expected newline or end of document after a statement"
+ )
+ pos += 1
+
+ return out.data.dict
+
+
+class Flags:
+ """Flags that map to parsed keys/namespaces."""
+
+ # Marks an immutable namespace (inline array or inline table).
+ FROZEN = 0
+ # Marks a nest that has been explicitly created and can no longer
+ # be opened using the "[table]" syntax.
+ EXPLICIT_NEST = 1
+
+ def __init__(self) -> None:
+ self._flags: dict[str, dict] = {}
+ self._pending_flags: set[tuple[Key, int]] = set()
+
+ def add_pending(self, key: Key, flag: int) -> None:
+ self._pending_flags.add((key, flag))
+
+ def finalize_pending(self) -> None:
+ for key, flag in self._pending_flags:
+ self.set(key, flag, recursive=False)
+ self._pending_flags.clear()
+
+ def unset_all(self, key: Key) -> None:
+ cont = self._flags
+ for k in key[:-1]:
+ if k not in cont:
+ return
+ cont = cont[k]["nested"]
+ cont.pop(key[-1], None)
+
+ def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
+ cont = self._flags
+ key_parent, key_stem = key[:-1], key[-1]
+ for k in key_parent:
+ if k not in cont:
+ cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+ cont = cont[k]["nested"]
+ if key_stem not in cont:
+ cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+ cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
+
+ def is_(self, key: Key, flag: int) -> bool:
+ if not key:
+ return False # document root has no flags
+ cont = self._flags
+ for k in key[:-1]:
+ if k not in cont:
+ return False
+ inner_cont = cont[k]
+ if flag in inner_cont["recursive_flags"]:
+ return True
+ cont = inner_cont["nested"]
+ key_stem = key[-1]
+ if key_stem in cont:
+ cont = cont[key_stem]
+ return flag in cont["flags"] or flag in cont["recursive_flags"]
+ return False
+
+
+class NestedDict:
+ def __init__(self) -> None:
+ # The parsed content of the TOML document
+ self.dict: dict[str, Any] = {}
+
+ def get_or_create_nest(
+ self,
+ key: Key,
+ *,
+ access_lists: bool = True,
+ ) -> dict:
+ cont: Any = self.dict
+ for k in key:
+ if k not in cont:
+ cont[k] = {}
+ cont = cont[k]
+ if access_lists and isinstance(cont, list):
+ cont = cont[-1]
+ if not isinstance(cont, dict):
+ raise KeyError("There is no nest behind this key")
+ return cont
+
+ def append_nest_to_list(self, key: Key) -> None:
+ cont = self.get_or_create_nest(key[:-1])
+ last_key = key[-1]
+ if last_key in cont:
+ list_ = cont[last_key]
+ if not isinstance(list_, list):
+ raise KeyError("An object other than list found behind this key")
+ list_.append({})
+ else:
+ cont[last_key] = [{}]
+
+
+class Output(NamedTuple):
+ data: NestedDict
+ flags: Flags
+
+
+def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
+ try:
+ while src[pos] in chars:
+ pos += 1
+ except IndexError:
+ pass
+ return pos
+
+
+def skip_until(
+ src: str,
+ pos: Pos,
+ expect: str,
+ *,
+ error_on: frozenset[str],
+ error_on_eof: bool,
+) -> Pos:
+ try:
+ new_pos = src.index(expect, pos)
+ except ValueError:
+ new_pos = len(src)
+ if error_on_eof:
+ raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
+
+ if not error_on.isdisjoint(src[pos:new_pos]):
+ while src[pos] not in error_on:
+ pos += 1
+ raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
+ return new_pos
+
+
+def skip_comment(src: str, pos: Pos) -> Pos:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char == "#":
+ return skip_until(
+ src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
+ )
+ return pos
+
+
+def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
+ while True:
+ pos_before_skip = pos
+ pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+ pos = skip_comment(src, pos)
+ if pos == pos_before_skip:
+ return pos
+
+
+def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
+ pos += 1 # Skip "["
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key = parse_key(src, pos)
+
+ if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Cannot declare {key} twice")
+ out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+ try:
+ out.data.get_or_create_nest(key)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+
+ if not src.startswith("]", pos):
+ raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration")
+ return pos + 1, key
+
+
+def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
+ pos += 2 # Skip "[["
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key = parse_key(src, pos)
+
+ if out.flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
+ # Free the namespace now that it points to another empty list item...
+ out.flags.unset_all(key)
+ # ...but this key precisely is still prohibited from table declaration
+ out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+ try:
+ out.data.append_nest_to_list(key)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+
+ if not src.startswith("]]", pos):
+ raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration")
+ return pos + 2, key
+
+
+def key_value_rule(
+ src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
+) -> Pos:
+ pos, key, value = parse_key_value_pair(src, pos, parse_float)
+ key_parent, key_stem = key[:-1], key[-1]
+ abs_key_parent = header + key_parent
+
+ relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
+ for cont_key in relative_path_cont_keys:
+ # Check that dotted key syntax does not redefine an existing table
+ if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
+ raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}")
+ # Containers in the relative path can't be opened with the table syntax or
+ # dotted key/value syntax in following table sections.
+ out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
+
+ if out.flags.is_(abs_key_parent, Flags.FROZEN):
+ raise suffixed_err(
+ src, pos, f"Cannot mutate immutable namespace {abs_key_parent}"
+ )
+
+ try:
+ nest = out.data.get_or_create_nest(abs_key_parent)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+ if key_stem in nest:
+ raise suffixed_err(src, pos, "Cannot overwrite a value")
+ # Mark inline table and array namespaces recursively immutable
+ if isinstance(value, (dict, list)):
+ out.flags.set(header + key, Flags.FROZEN, recursive=True)
+ nest[key_stem] = value
+ return pos
+
+
+def parse_key_value_pair(
+ src: str, pos: Pos, parse_float: ParseFloat
+) -> tuple[Pos, Key, Any]:
+ pos, key = parse_key(src, pos)
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char != "=":
+ raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair")
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, value = parse_value(src, pos, parse_float)
+ return pos, key, value
+
+
+def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
+ pos, key_part = parse_key_part(src, pos)
+ key: Key = (key_part,)
+ pos = skip_chars(src, pos, TOML_WS)
+ while True:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char != ".":
+ return pos, key
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key_part = parse_key_part(src, pos)
+ key += (key_part,)
+ pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char in BARE_KEY_CHARS:
+ start_pos = pos
+ pos = skip_chars(src, pos, BARE_KEY_CHARS)
+ return pos, src[start_pos:pos]
+ if char == "'":
+ return parse_literal_str(src, pos)
+ if char == '"':
+ return parse_one_line_basic_str(src, pos)
+ raise suffixed_err(src, pos, "Invalid initial character for a key part")
+
+
+def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
+ pos += 1
+ return parse_basic_str(src, pos, multiline=False)
+
+
+def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]:
+ pos += 1
+ array: list = []
+
+ pos = skip_comments_and_array_ws(src, pos)
+ if src.startswith("]", pos):
+ return pos + 1, array
+ while True:
+ pos, val = parse_value(src, pos, parse_float)
+ array.append(val)
+ pos = skip_comments_and_array_ws(src, pos)
+
+ c = src[pos : pos + 1]
+ if c == "]":
+ return pos + 1, array
+ if c != ",":
+ raise suffixed_err(src, pos, "Unclosed array")
+ pos += 1
+
+ pos = skip_comments_and_array_ws(src, pos)
+ if src.startswith("]", pos):
+ return pos + 1, array
+
+
+def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]:
+ pos += 1
+ nested_dict = NestedDict()
+ flags = Flags()
+
+ pos = skip_chars(src, pos, TOML_WS)
+ if src.startswith("}", pos):
+ return pos + 1, nested_dict.dict
+ while True:
+ pos, key, value = parse_key_value_pair(src, pos, parse_float)
+ key_parent, key_stem = key[:-1], key[-1]
+ if flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
+ try:
+ nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+ if key_stem in nest:
+ raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
+ nest[key_stem] = value
+ pos = skip_chars(src, pos, TOML_WS)
+ c = src[pos : pos + 1]
+ if c == "}":
+ return pos + 1, nested_dict.dict
+ if c != ",":
+ raise suffixed_err(src, pos, "Unclosed inline table")
+ if isinstance(value, (dict, list)):
+ flags.set(key, Flags.FROZEN, recursive=True)
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_basic_str_escape(
+ src: str, pos: Pos, *, multiline: bool = False
+) -> tuple[Pos, str]:
+ escape_id = src[pos : pos + 2]
+ pos += 2
+ if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
+ # Skip whitespace until next non-whitespace character or end of
+ # the doc. Error if non-whitespace is found before newline.
+ if escape_id != "\\\n":
+ pos = skip_chars(src, pos, TOML_WS)
+ try:
+ char = src[pos]
+ except IndexError:
+ return pos, ""
+ if char != "\n":
+ raise suffixed_err(src, pos, "Unescaped '\\' in a string")
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+ return pos, ""
+ if escape_id == "\\u":
+ return parse_hex_char(src, pos, 4)
+ if escape_id == "\\U":
+ return parse_hex_char(src, pos, 8)
+ try:
+ return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
+ except KeyError:
+ raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None
+
+
+def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
+ return parse_basic_str_escape(src, pos, multiline=True)
+
+
+def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
+ hex_str = src[pos : pos + hex_len]
+ if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
+ raise suffixed_err(src, pos, "Invalid hex value")
+ pos += hex_len
+ hex_int = int(hex_str, 16)
+ if not is_unicode_scalar_value(hex_int):
+ raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
+ return pos, chr(hex_int)
+
+
+def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
+ pos += 1 # Skip starting apostrophe
+ start_pos = pos
+ pos = skip_until(
+ src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
+ )
+ return pos + 1, src[start_pos:pos] # Skip ending apostrophe
+
+
+def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
+ pos += 3
+ if src.startswith("\n", pos):
+ pos += 1
+
+ if literal:
+ delim = "'"
+ end_pos = skip_until(
+ src,
+ pos,
+ "'''",
+ error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
+ error_on_eof=True,
+ )
+ result = src[pos:end_pos]
+ pos = end_pos + 3
+ else:
+ delim = '"'
+ pos, result = parse_basic_str(src, pos, multiline=True)
+
+ # Add at maximum two extra apostrophes/quotes if the end sequence
+ # is 4 or 5 chars long instead of just 3.
+ if not src.startswith(delim, pos):
+ return pos, result
+ pos += 1
+ if not src.startswith(delim, pos):
+ return pos, result + delim
+ pos += 1
+ return pos, result + (delim * 2)
+
+
+def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
+ if multiline:
+ error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
+ parse_escapes = parse_basic_str_escape_multiline
+ else:
+ error_on = ILLEGAL_BASIC_STR_CHARS
+ parse_escapes = parse_basic_str_escape
+ result = ""
+ start_pos = pos
+ while True:
+ try:
+ char = src[pos]
+ except IndexError:
+ raise suffixed_err(src, pos, "Unterminated string") from None
+ if char == '"':
+ if not multiline:
+ return pos + 1, result + src[start_pos:pos]
+ if src.startswith('"""', pos):
+ return pos + 3, result + src[start_pos:pos]
+ pos += 1
+ continue
+ if char == "\\":
+ result += src[start_pos:pos]
+ pos, parsed_escape = parse_escapes(src, pos)
+ result += parsed_escape
+ start_pos = pos
+ continue
+ if char in error_on:
+ raise suffixed_err(src, pos, f"Illegal character {char!r}")
+ pos += 1
+
+
+def parse_value( # noqa: C901
+ src: str, pos: Pos, parse_float: ParseFloat
+) -> tuple[Pos, Any]:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+
+ # IMPORTANT: order conditions based on speed of checking and likelihood
+
+ # Basic strings
+ if char == '"':
+ if src.startswith('"""', pos):
+ return parse_multiline_str(src, pos, literal=False)
+ return parse_one_line_basic_str(src, pos)
+
+ # Literal strings
+ if char == "'":
+ if src.startswith("'''", pos):
+ return parse_multiline_str(src, pos, literal=True)
+ return parse_literal_str(src, pos)
+
+ # Booleans
+ if char == "t":
+ if src.startswith("true", pos):
+ return pos + 4, True
+ if char == "f":
+ if src.startswith("false", pos):
+ return pos + 5, False
+
+ # Arrays
+ if char == "[":
+ return parse_array(src, pos, parse_float)
+
+ # Inline tables
+ if char == "{":
+ return parse_inline_table(src, pos, parse_float)
+
+ # Dates and times
+ datetime_match = RE_DATETIME.match(src, pos)
+ if datetime_match:
+ try:
+ datetime_obj = match_to_datetime(datetime_match)
+ except ValueError as e:
+ raise suffixed_err(src, pos, "Invalid date or datetime") from e
+ return datetime_match.end(), datetime_obj
+ localtime_match = RE_LOCALTIME.match(src, pos)
+ if localtime_match:
+ return localtime_match.end(), match_to_localtime(localtime_match)
+
+ # Integers and "normal" floats.
+ # The regex will greedily match any type starting with a decimal
+ # char, so needs to be located after handling of dates and times.
+ number_match = RE_NUMBER.match(src, pos)
+ if number_match:
+ return number_match.end(), match_to_number(number_match, parse_float)
+
+ # Special floats
+ first_three = src[pos : pos + 3]
+ if first_three in {"inf", "nan"}:
+ return pos + 3, parse_float(first_three)
+ first_four = src[pos : pos + 4]
+ if first_four in {"-inf", "+inf", "-nan", "+nan"}:
+ return pos + 4, parse_float(first_four)
+
+ raise suffixed_err(src, pos, "Invalid value")
+
+
+def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
+ """Return a `TOMLDecodeError` where error message is suffixed with
+ coordinates in source."""
+
+ def coord_repr(src: str, pos: Pos) -> str:
+ if pos >= len(src):
+ return "end of document"
+ line = src.count("\n", 0, pos) + 1
+ if line == 1:
+ column = pos + 1
+ else:
+ column = pos - src.rindex("\n", 0, pos)
+ return f"line {line}, column {column}"
+
+ return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
+
+
+def is_unicode_scalar_value(codepoint: int) -> bool:
+ return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
+
+
+def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
+ """A decorator to make `parse_float` safe.
+
+ `parse_float` must not return dicts or lists, because these types
+ would be mixed with parsed TOML tables and arrays, thus confusing
+ the parser. The returned decorated callable raises `ValueError`
+ instead of returning illegal types.
+ """
+ # The default `float` callable never returns illegal types. Optimize it.
+ if parse_float is float: # type: ignore[comparison-overlap]
+ return float
+
+ def safe_parse_float(float_str: str) -> Any:
+ float_value = parse_float(float_str)
+ if isinstance(float_value, (dict, list)):
+ raise ValueError("parse_float must not return dicts or lists")
+ return float_value
+
+ return safe_parse_float
diff --git a/libs/dynaconf/vendor/tomllib/_re.py b/libs/dynaconf/vendor/tomllib/_re.py
new file mode 100644
index 000000000..053634537
--- /dev/null
+++ b/libs/dynaconf/vendor/tomllib/_re.py
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+
+from __future__ import annotations
+
+from datetime import date, datetime, time, timedelta, timezone, tzinfo
+from functools import lru_cache
+import re
+from typing import Any
+
+from ._types import ParseFloat
+
+# E.g.
+# - 00:32:00.999999
+# - 00:32:00
+_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
+
+RE_NUMBER = re.compile(
+ r"""
+0
+(?:
+ x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
+ |
+ b[01](?:_?[01])* # bin
+ |
+ o[0-7](?:_?[0-7])* # oct
+)
+|
+[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
+(?P<floatpart>
+ (?:\.[0-9](?:_?[0-9])*)? # optional fractional part
+ (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
+)
+""",
+ flags=re.VERBOSE,
+)
+RE_LOCALTIME = re.compile(_TIME_RE_STR)
+RE_DATETIME = re.compile(
+ rf"""
+([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
+(?:
+ [Tt ]
+ {_TIME_RE_STR}
+ (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
+)?
+""",
+ flags=re.VERBOSE,
+)
+
+
+def match_to_datetime(match: re.Match) -> datetime | date:
+ """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
+
+ Raises ValueError if the match does not correspond to a valid date
+ or datetime.
+ """
+ (
+ year_str,
+ month_str,
+ day_str,
+ hour_str,
+ minute_str,
+ sec_str,
+ micros_str,
+ zulu_time,
+ offset_sign_str,
+ offset_hour_str,
+ offset_minute_str,
+ ) = match.groups()
+ year, month, day = int(year_str), int(month_str), int(day_str)
+ if hour_str is None:
+ return date(year, month, day)
+ hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
+ micros = int(micros_str.ljust(6, "0")) if micros_str else 0
+ if offset_sign_str:
+ tz: tzinfo | None = cached_tz(
+ offset_hour_str, offset_minute_str, offset_sign_str
+ )
+ elif zulu_time:
+ tz = timezone.utc
+ else: # local date-time
+ tz = None
+ return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
+
+
+@lru_cache(maxsize=None)
+def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
+ sign = 1 if sign_str == "+" else -1
+ return timezone(
+ timedelta(
+ hours=sign * int(hour_str),
+ minutes=sign * int(minute_str),
+ )
+ )
+
+
+def match_to_localtime(match: re.Match) -> time:
+ hour_str, minute_str, sec_str, micros_str = match.groups()
+ micros = int(micros_str.ljust(6, "0")) if micros_str else 0
+ return time(int(hour_str), int(minute_str), int(sec_str), micros)
+
+
+def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
+ if match.group("floatpart"):
+ return parse_float(match.group())
+ return int(match.group(), 0)
diff --git a/libs/dynaconf/vendor/tomllib/_types.py b/libs/dynaconf/vendor/tomllib/_types.py
new file mode 100644
index 000000000..68d70d9f9
--- /dev/null
+++ b/libs/dynaconf/vendor/tomllib/_types.py
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+
+from typing import Any, Callable, Tuple
+
+# Type annotations
+ParseFloat = Callable[[str], Any]
+Key = Tuple[str, ...]
+Pos = int
diff --git a/libs/dynaconf/vendor/tomllib/_writer.py b/libs/dynaconf/vendor/tomllib/_writer.py
new file mode 100644
index 000000000..e67e53963
--- /dev/null
+++ b/libs/dynaconf/vendor/tomllib/_writer.py
@@ -0,0 +1,202 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+
+from __future__ import annotations
+
+from collections.abc import Generator, Mapping
+from datetime import date, datetime, time
+from decimal import Decimal
+import string
+from types import MappingProxyType
+from typing import Any, BinaryIO, NamedTuple
+
+ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
+ILLEGAL_BASIC_STR_CHARS = frozenset('"\\') | ASCII_CTRL - frozenset("\t")
+BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
+ARRAY_TYPES = (list, tuple)
+ARRAY_INDENT = " " * 4
+MAX_LINE_LENGTH = 100
+
+COMPACT_ESCAPES = MappingProxyType(
+ {
+ "\u0008": "\\b", # backspace
+ "\u000A": "\\n", # linefeed
+ "\u000C": "\\f", # form feed
+ "\u000D": "\\r", # carriage return
+ "\u0022": '\\"', # quote
+ "\u005C": "\\\\", # backslash
+ }
+)
+
+
+def dump(
+ __obj: dict[str, Any], __fp: BinaryIO, *, multiline_strings: bool = False
+) -> None:
+ ctx = Context(multiline_strings, {})
+ for chunk in gen_table_chunks(__obj, ctx, name=""):
+ __fp.write(chunk.encode())
+
+
+def dumps(__obj: dict[str, Any], *, multiline_strings: bool = False) -> str:
+ ctx = Context(multiline_strings, {})
+ return "".join(gen_table_chunks(__obj, ctx, name=""))
+
+
+class Context(NamedTuple):
+ allow_multiline: bool
+ # cache rendered inline tables (mapping from object id to rendered inline table)
+ inline_table_cache: dict[int, str]
+
+
+def gen_table_chunks(
+ table: Mapping[str, Any],
+ ctx: Context,
+ *,
+ name: str,
+ inside_aot: bool = False,
+) -> Generator[str, None, None]:
+ yielded = False
+ literals = []
+ tables: list[tuple[str, Any, bool]] = [] # => [(key, value, inside_aot)]
+ for k, v in table.items():
+ if isinstance(v, dict):
+ tables.append((k, v, False))
+ elif is_aot(v) and not all(is_suitable_inline_table(t, ctx) for t in v):
+ tables.extend((k, t, True) for t in v)
+ else:
+ literals.append((k, v))
+
+ if inside_aot or name and (literals or not tables):
+ yielded = True
+ yield f"[[{name}]]\n" if inside_aot else f"[{name}]\n"
+
+ if literals:
+ yielded = True
+ for k, v in literals:
+ yield f"{format_key_part(k)} = {format_literal(v, ctx)}\n"
+
+ for k, v, in_aot in tables:
+ if yielded:
+ yield "\n"
+ else:
+ yielded = True
+ key_part = format_key_part(k)
+ display_name = f"{name}.{key_part}" if name else key_part
+ yield from gen_table_chunks(v, ctx, name=display_name, inside_aot=in_aot)
+
+
+def format_literal(obj: object, ctx: Context, *, nest_level: int = 0) -> str:
+ if isinstance(obj, bool):
+ return "true" if obj else "false"
+ if isinstance(obj, (int, float, date, datetime)):
+ return str(obj)
+ if isinstance(obj, Decimal):
+ return format_decimal(obj)
+ if isinstance(obj, time):
+ if obj.tzinfo:
+ raise ValueError("TOML does not support offset times")
+ return str(obj)
+ if isinstance(obj, str):
+ return format_string(obj, allow_multiline=ctx.allow_multiline)
+ if isinstance(obj, ARRAY_TYPES):
+ return format_inline_array(obj, ctx, nest_level)
+ if isinstance(obj, dict):
+ return format_inline_table(obj, ctx)
+ raise TypeError(f"Object of type {type(obj)} is not TOML serializable")
+
+
+def format_decimal(obj: Decimal) -> str:
+ if obj.is_nan():
+ return "nan"
+ if obj == Decimal("inf"):
+ return "inf"
+ if obj == Decimal("-inf"):
+ return "-inf"
+ return str(obj)
+
+
+def format_inline_table(obj: dict, ctx: Context) -> str:
+ # check cache first
+ obj_id = id(obj)
+ if obj_id in ctx.inline_table_cache:
+ return ctx.inline_table_cache[obj_id]
+
+ if not obj:
+ rendered = "{}"
+ else:
+ rendered = (
+ "{ "
+ + ", ".join(
+ f"{format_key_part(k)} = {format_literal(v, ctx)}"
+ for k, v in obj.items()
+ )
+ + " }"
+ )
+ ctx.inline_table_cache[obj_id] = rendered
+ return rendered
+
+
+def format_inline_array(obj: tuple | list, ctx: Context, nest_level: int) -> str:
+ if not obj:
+ return "[]"
+ item_indent = ARRAY_INDENT * (1 + nest_level)
+ closing_bracket_indent = ARRAY_INDENT * nest_level
+ return (
+ "[\n"
+ + ",\n".join(
+ item_indent + format_literal(item, ctx, nest_level=nest_level + 1)
+ for item in obj
+ )
+ + f",\n{closing_bracket_indent}]"
+ )
+
+
+def format_key_part(part: str) -> str:
+ if part and BARE_KEY_CHARS.issuperset(part):
+ return part
+ return format_string(part, allow_multiline=False)
+
+
+def format_string(s: str, *, allow_multiline: bool) -> str:
+ do_multiline = allow_multiline and "\n" in s
+ if do_multiline:
+ result = '"""\n'
+ s = s.replace("\r\n", "\n")
+ else:
+ result = '"'
+
+ pos = seq_start = 0
+ while True:
+ try:
+ char = s[pos]
+ except IndexError:
+ result += s[seq_start:pos]
+ if do_multiline:
+ return result + '"""'
+ return result + '"'
+ if char in ILLEGAL_BASIC_STR_CHARS:
+ result += s[seq_start:pos]
+ if char in COMPACT_ESCAPES:
+ if do_multiline and char == "\n":
+ result += "\n"
+ else:
+ result += COMPACT_ESCAPES[char]
+ else:
+ result += "\\u" + hex(ord(char))[2:].rjust(4, "0")
+ seq_start = pos + 1
+ pos += 1
+
+
+def is_aot(obj: Any) -> bool:
+ """Decides if an object behaves as an array of tables (i.e. a nonempty list
+ of dicts)."""
+ return bool(
+ isinstance(obj, ARRAY_TYPES) and obj and all(isinstance(v, dict) for v in obj)
+ )
+
+
+def is_suitable_inline_table(obj: dict, ctx: Context) -> bool:
+ """Use heuristics to decide if the inline-style representation is a good
+ choice for a given table."""
+ rendered_inline = f"{ARRAY_INDENT}{format_inline_table(obj, ctx)},"
+ return len(rendered_inline) <= MAX_LINE_LENGTH and "\n" not in rendered_inline
diff --git a/libs/dynaconf/vendor/vendor.txt b/libs/dynaconf/vendor/vendor.txt
new file mode 100644
index 000000000..65f74aa33
--- /dev/null
+++ b/libs/dynaconf/vendor/vendor.txt
@@ -0,0 +1,6 @@
+python-box==4.2.3
+toml==0.10.8
+tomli==2.0.1
+click==7.1.x
+python-dotenv==0.13.0
+ruamel.yaml==0.16.10
diff --git a/libs/dynaconf/vendor/vendor_history b/libs/dynaconf/vendor/vendor_history
new file mode 100644
index 000000000..1eef3a204
--- /dev/null
+++ b/libs/dynaconf/vendor/vendor_history
@@ -0,0 +1,26 @@
+## TOMLLIB
+
+- Sept 4, 2022
+
+Added tomli as a vendored library to replace uiri/toml
+this lib also has MIT license.
+PAckage renamed to `tomllib` to be compatible with std lib on python 3.11
+Added tomli-w._write to the tomllib.
+
+## TOML
+
+- Sept 4, 2022
+
+uiri/toml is kept as a backwards compatibility but tomllib has been
+introduces as the default TOML parser.
+
+`toml` is a fallback if tomllib fails to parse the file.
+that was made because `toml` allows unicode characters while tomllib
+follows the spec strictly.
+
+## BOX
+
+- Mar 2, 2021
+
+Fix #462 make DynaBox nested List to use DynaBox as default class
+https://github.com/dynaconf/dynaconf/pull/533/files
diff --git a/libs/simpleconfigparser/__init__.py b/libs/simpleconfigparser/__init__.py
deleted file mode 100644
index c84ccbacc..000000000
--- a/libs/simpleconfigparser/__init__.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-The MIT License
-
-Copyright (c) 2013 Helgi Þorbjörnsson <[email protected]>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-"""
-
-try:
- from configparser import ConfigParser as configparser, NoOptionError, NoSectionError
-except ImportError:
- from ConfigParser import RawConfigParser as configparser, NoOptionError, NoSectionError
-
-
-class simpleconfigparser(configparser):
- class Section(dict):
- """
- Contain the section specific items that can be accessed via object properties
- """
- parser = None
- section = None
-
- def __init__(self, section, parser):
- self.section = section
- self.parser = parser
-
- def __getitem__(self, name, raw=False, vars=None):
- """Fetch a value via the dict handler"""
- if name not in simpleconfigparser.Section.__dict__:
- return self.parser.get(self.section, name, raw, vars)
-
- def __setitem__(self, name, value):
- """Set a value via the dict handler"""
- if name in simpleconfigparser.Section.__dict__:
- return dict.__setitem__(self, name, value)
-
- return self.parser.set(self.section, name, value)
-
- def __getattr__(self, name, raw=False, vars=None):
- """Fetch a value via the object handler"""
- if name not in simpleconfigparser.Section.__dict__:
- return self.parser.get(self.section, name, raw, vars)
-
- def __setattr__(self, name, value):
- """Set a value via the object handler"""
- if name in simpleconfigparser.Section.__dict__:
- return object.__setattr__(self, name, value)
-
- return self.parser.set(self.section, name, value)
-
- def getboolean(self, name):
- if not self.section:
- return None
-
- return self.parser.getboolean(self.section, name)
-
- def items(self):
- if not self.section:
- return None
-
- items = []
- for key, value in self.parser.items(self.section):
- # strip quotes
- items.append((key, value.strip('"\'')))
-
- return items
-
- def __init__(self, defaults=None, *args, **kwargs):
- configparser.__init__(self, defaults=None, *args, **kwargs)
- # Improved defaults handling
- if isinstance(defaults, dict):
- for section, values in defaults.items():
- # Break out original format defaults was passed in
- if not isinstance(values, dict):
- break
-
- if section not in self.sections():
- self.add_section(section)
-
- for name, value in values.items():
- self.set(section, name, str(value))
-
- def __getitem__(self, name):
- """Access a section via a dict handler"""
- if name not in simpleconfigparser.__dict__:
- if name not in self.sections():
- self.add_section(name)
-
- return simpleconfigparser.Section(name, self)
-
- return None
-
- def __getattr__(self, name, raw=False, vars=None):
- """Access a section via a object handler"""
- if name not in simpleconfigparser.__dict__:
- if name not in self.sections():
- self.add_section(name)
-
- return simpleconfigparser.Section(name, self)
-
- return None
-
- def set(self, section, option, value=None):
- try:
- return configparser.set(self, section, option, value)
- except NoSectionError:
- return None
-
- def get(self, section, option, raw=False, vars=None, fallback=None):
- try:
- # Strip out quotes from the edges
- return configparser.get(self, section, option).strip('"\'')
- except NoOptionError:
- return None
diff --git a/libs/version.txt b/libs/version.txt
index c35d40497..ced825452 100644
--- a/libs/version.txt
+++ b/libs/version.txt
@@ -8,6 +8,7 @@ attrs==22.1.0
charset-normalizer==3.1.0
deep-translator==1.9.1
dogpile.cache==1.1.8
+dynaconf==3.1.12
fese==0.1.2
ffsubsync==0.4.20
Flask-Compress==1.13 # modified to import brotli only if required
@@ -42,7 +43,6 @@ whichcraft==0.6.1
# Bazarr modified dependencies
#signalr-client-threads==0.0.12 # Modified to work with Sonarr v3. Not used anymore with v4
-#SimpleConfigParser==0.1.0 # modified version: do not update!!!
#subliminal_patch # Modified version from Sub-Zero.bundle
#subzero # Modified version from Sub-Zero.bundle