def write_toml(self, filename: Union[str, Path]) -> PhantomConfig: """Write config to TOML file. Parameters ---------- filename The name of the TOML output file. """ # TODO: writing to TOML does not preserve the comments. document = tomlkit.document() if self.header is not None: for line in self.header: document.add(tomlkit.comment(line)) document.add(tomlkit.nl()) d = self.to_dict() for block_key, block_val in d.items(): block = tomlkit.table() if isinstance(block_val, dict): for name, item in block_val.items(): value, comment = item if isinstance(value, datetime.timedelta): value = _convert_timedelta_to_str(value) block.add(tomlkit.nl()) if comment is not None: block.add(tomlkit.comment(comment)) block.add(name, value) document.add(block_key, block) with open(filename, 'w') as fp: fp.write(tomlkit.dumps(document)) return self
def generate_toml_help(config_cls, *, parent=None): if parent is None: parent = tomlkit.table() doclines = trim(config_cls.__doc__).split("\n") for line in doclines: parent.add(tomlkit.comment(line)) parent.add(tomlkit.nl()) for attrib in attr.fields(config_cls): meta = attrib.metadata.get(CNF_KEY) if attr.has(attrib.type): # yield (attrib.name,), attrib sub_doc = generate_toml_help(attrib.type) parent.add(attrib.name, sub_doc) else: if meta: parent.add(tomlkit.comment(meta.help)) if attrib.default in (missing, attr.NOTHING): parent.add(tomlkit.comment(f"{attrib.name} =")) else: default = (attrib.default() if callable(attrib.default) else attrib.default) parent.add(attrib.name, default) parent.add(tomlkit.nl()) return parent
def toml_headers(): filename = Path("_netlify.toml") doc = document() doc.add(comment("netlify.toml")) doc.add(comment("Generated: " + datetime.now().isoformat())) build = table() env = table().indent(2) env["YARN_VERSION"] = "1.21.0" build["publish"] = "_site/" build["command"] = "make build" build["environment"] = env doc["build"] = build headers = aot() sw = make_headers("sw.js", { "service-worker-allowed": "/", "cache-control": NO_CACHE }) headers.append(sw) manifest = make_headers("**/manifest.json", {"cache-control": NO_CACHE}) headers.append(manifest) for pattern in FOREVER_PATTERNS: headers.append(make_headers(pattern, {"cache-control": CACHE_FOREVER})) doc["headers"] = headers output = dumps(doc) print(output) sz = filename.write_text(output) print(sz)
def create_configuration(self, config: Configuration, *style_urls: str) -> None: """Create a configuration file.""" from nitpick.style import StyleManager # pylint: disable=import-outside-toplevel if config.file: doc: TOMLDocument = tomlkit.parse(config.file.read_text()) else: doc = tomlkit.document() config.file = self.root / DOT_NITPICK_TOML if not style_urls: style_urls = (str(StyleManager.get_default_style_url()), ) tool_nitpick = tomlkit.table() tool_nitpick.add( tomlkit.comment("Generated by the 'nitpick init' command")) tool_nitpick.add( tomlkit.comment( f"More info at {READ_THE_DOCS_URL}configuration.html")) tool_nitpick.add( "style", tomlkit.array([tomlkit.string(url) for url in style_urls])) doc.add(SingleKey(TOOL_NITPICK_KEY, KeyType.Bare), tool_nitpick) # config.file will always have a value at this point, but mypy can't see it. config.file.write_text(tomlkit.dumps(doc, sort_keys=True)) # type: ignore
def add_dictionary(current_path, data): """ Recursive function to output config contents. """ section = tk.table() if len(current_path): ini_contents.add(tk.nl()) ini_contents.add(tk.nl()) ini_contents.add( tk.comment( "##############################################################################" )) ini_contents.add( tk.comment(f'{".".join(current_path): ^76} #')) ini_contents.add(tk.comment(f"{'': <76} #")) wrap_comment_lines( _(f'lib.configs.section.{".".join(current_path)}'), True, contents=ini_contents) ini_contents.add( tk.comment( "##############################################################################" )) for config, items in data.items(): temp_path = deepcopy(current_path) temp_path.append(config) if isinstance(items, ConfigItem): if items.value is None: continue add_config_item(section, temp_path, items) else: add_dictionary(temp_path, items) if len(current_path): ini_contents[".".join(current_path)] = section
def perform_query(args: argparse.Namespace): client = MiniClient(args.host, args.port) dt_str = get_iso8601_dt_str() # dump mapping with open(args.queries) as f: doc = tomlkit.loads(f.read()) all_queries = [] query_names = [] # TODO: pre-populate with existing results -- if something we have # already satisfies the new queries, use that instead for query_name, q_body in doc['queries'].items(): if 'doc_id' in q_body and not q_body.get('re-query', False): continue all_queries.append(json.loads(q_body['query'])) query_names.append(query_name) # FIXME: checkpoint file name id_to_query = dump_ids_from_queries(client, args.index, all_queries, args.num, checkpoint_path='ckpt.pkl', size=args.size) # do a deduplication before we do the set cover problem # better (?) if we consider superset/subset before doing the # minimal set cover subsets = {} universe = set() for doc_id, subset in id_to_query.items(): universe |= subset subsets[frozenset(subset)] = doc_id missing = set(range(len(all_queries))) - universe missing_cmt = tomlkit.comment(f"No match was found on {dt_str}") for missing_id in missing: query_name = query_names[missing_id] doc['queries'][query_name].add('doc_id', []) doc['queries'][query_name].add(missing_cmt) picked_ids = minimal_cover_set(subsets) found_cmt = tomlkit.comment(f"Updated on {dt_str}") for doc_id, subsets in picked_ids.items(): for query_idx in subsets: query_name = query_names[query_idx] q_body = doc['queries'][query_name] docs = q_body.get('doc_id', []) docs.append(doc_id) found_cmt = tomlkit.comment(f"{doc_id} added on {dt_str}") # if 'doc_id' in q_body: # q_body.remove('doc_id') # here is the thing, pop is broken q_body['doc_id'] = docs q_body.add(found_cmt) # FIXME: can't precisely control the presentation, # tomlkit is really lacking in terms of documentation # but on the other hand it is kind of insane to use it this way with open(args.queries, 'w') as f: f.write(tomlkit.dumps(doc))
def wrap_comment_lines(text, trailing_comment: Optional[bool] = None, contents: Optional = None): """ Creates a comment that wraps long lines. """ output = wrap(text, 75, break_long_words=False) if contents is None: contents = ini_contents for line in output: if trailing_comment is True: contents.add(tk.comment(f"{line: <76} #")) else: contents.add(tk.comment(f"{line}"))
def do_import( project: Project, filename: str, format: str | None = None, options: Namespace | None = None, ) -> None: """Import project metadata from given file. :param project: the project instance :param filename: the file name :param format: the file format, or guess if not given. :param options: other options parsed to the CLI. """ if not format: for key in FORMATS: if FORMATS[key].check_fingerprint(project, filename): break else: raise PdmUsageError( "Can't derive the file format automatically, " "please specify it via '-f/--format' option." ) else: key = format if options is None: options = Namespace(dev=False, group=None) project_data, settings = FORMATS[key].convert(project, filename, options) pyproject = project.pyproject or tomlkit.document() if "tool" not in pyproject or "pdm" not in pyproject["tool"]: # type: ignore pyproject.setdefault("tool", {})["pdm"] = tomlkit.table() if "project" not in pyproject: pyproject.add("project", tomlkit.table()) # type: ignore pyproject["project"].add( # type: ignore tomlkit.comment("PEP 621 project metadata") ) pyproject["project"].add( # type: ignore tomlkit.comment("See https://www.python.org/dev/peps/pep-0621/") ) merge_dictionary(pyproject["project"], project_data) # type: ignore merge_dictionary(pyproject["tool"]["pdm"], settings) # type: ignore pyproject["build-system"] = { "requires": ["pdm-pep517"], "build-backend": "pdm.pep517.api", } project.pyproject = cast(dict, pyproject) project.write_pyproject()
def write_to_file(parameters: dataclass, filename: Union[str, Path], *, header: str = None, overwrite: bool = False) -> None: """ Write the parameters to TOML file. Parameters ---------- parameters : dataclass The parameters dataclass to write to file. filename : str or Path The name of the file to write. Should have extension '.toml'. header : str A header written as a TOML comment at the top of the file. overwrite : bool, default=False Whether to overwrite if the file exists. """ if not overwrite: if pathlib.Path(filename).exists(): raise ValueError( 'file already exists, add overwrite=True to overwrite') document = tomlkit.document() if header is not None: header = textwrap.wrap(header, 70) for header_ in header: document.add(tomlkit.comment(header_)) for param in dataclasses.fields(parameters): name = param.name description = textwrap.wrap(param.metadata['description'], 70) value = getattr(parameters, param.name) if isinstance(value, tuple): value = list(value) document.add(tomlkit.nl()) for desc in description: document.add(tomlkit.comment(desc)) document.add(name, value) with open(filename, 'w') as fp: fp.write(tomlkit.dumps(document)) return
def add_value(sec, k, v): is_none = v is None if is_none: sec.add(tomlkit.comment(f'{k} = # Uncomment to use')) else: sec.add(k, v) return not is_none
def generate_queries(args: argparse.Namespace): client = MiniClient(args.host, args.port) # dump mapping mappings = client.get_mapping(index=args.index)['mappings'] all_fields = list(get_fields(mappings['properties'])) doc = tomlkit.document() dt_str = get_iso8601_dt_str() comment = tomlkit.comment(f"Automatically generated on {dt_str}") # build all queries queries = tomlkit.table() for field_name in all_fields: t = tomlkit.table() t.add(comment) replace_name = field_name.replace('.', '_') query_name = f'field_{replace_name}_exists' query = json.dumps({'query': {'exists': {'field': field_name}}}) t.add('query', query) t.add('auto_gen', True) queries.add(query_name, t) doc["queries"] = queries with open(args.output, 'w') as f: f.write(tomlkit.dumps(doc))
def _write(self): """ Writes build definition details into build.toml file, which would be used by the next build. build.toml file will contain the same information as build graph, function details will only be preserved as function names layer details will only be preserved as layer names """ # convert build definition list into toml table function_build_definitions_table = tomlkit.table() for build_definition in self._function_build_definitions: build_definition_as_table = _function_build_definition_to_toml_table(build_definition) function_build_definitions_table.add(build_definition.uuid, build_definition_as_table) layer_build_definitions_table = tomlkit.table() for build_definition in self._layer_build_definitions: build_definition_as_table = _layer_build_definition_to_toml_table(build_definition) layer_build_definitions_table.add(build_definition.uuid, build_definition_as_table) # create toml document and add build definitions document = tomlkit.document() document.add(tomlkit.comment("This file is auto generated by SAM CLI build command")) document.add(BuildGraph.FUNCTION_BUILD_DEFINITIONS, function_build_definitions_table) document.add(BuildGraph.LAYER_BUILD_DEFINITIONS, layer_build_definitions_table) if not self._filepath.exists(): open(self._filepath, "a+").close() self._filepath.write_text(tomlkit.dumps(document))
def merge_config_files_dict(config_files, configuration=None, testbed_configuration=None, user_settings=None): for this_config_file in config_files: hm_exp = config_files[this_config_file]["toml"].copy() block_config = tomlkit.document() if configuration is not None: f = this_config_file.split("/")[-1] if f == "config_exp.toml": block_config.add( tomlkit.comment( "\n# HARMONIE experiment configuration file\n#" + "\n# Please read the documentation on " + "https://hirlam.org/trac/wiki/HarmonieSystemDocumentation " + "first\n#")) for block in config_files[this_config_file]["blocks"]: if configuration is not None: # print(configuration) if type(configuration) is not dict: raise Exception("Configuration should be a dict here!") if block in configuration: merged_config = merge_toml_env(hm_exp[block], configuration[block]) else: merged_config = hm_exp[block] block_config.update({block: merged_config}) if testbed_configuration is not None: # print("testbed", testbed_configuration) if type(testbed_configuration) is not dict: raise Exception( "Testbed configuration should be a dict here!") if block in testbed_configuration: hm_testbed = merge_toml_env(block_config[block], testbed_configuration[block]) else: hm_testbed = block_config[block] block_config.update({block: hm_testbed}) if user_settings is not None: if type(user_settings) is not dict: raise Exception("User settings should be a dict here!") if block in user_settings: print("Merge user settings in block " + block) user = merge_toml_env(block_config[block], user_settings[block]) block_config.update({block: user}) config_files.update({this_config_file: {"toml": block_config}}) return config_files
def dict_to_toml(d): import tomlkit toml = tomlkit.document() toml.add(tomlkit.comment("Autogenertod by anysnake")) for key, sub_d in d.items(): table = tomlkit.table() for k, v in sub_d.items(): table.add(k, v) toml.add(key, table) return toml
def do_import(project: Project, filename: str, format: Optional[str] = None) -> None: """Import project metadata from given file. :param project: the project instance :param filename: the file name :param format: the file format, or guess if not given. """ if not format: for key in FORMATS: if FORMATS[key].check_fingerprint(project, filename): break else: raise PdmUsageError("Can't derive the file format automatically, " "please specify it via '-f/--format' option.") else: key = format project_data, settings = FORMATS[key].convert(project, filename) pyproject = project.pyproject or tomlkit.document() if "tool" not in pyproject or "pdm" not in pyproject["tool"]: setdefault(pyproject, "tool", {})["pdm"] = tomlkit.table() pyproject["tool"]["pdm"].update(settings) if "project" not in pyproject: pyproject.add("project", tomlkit.table()) pyproject["project"].add(tomlkit.comment("PEP 621 project metadata")) pyproject["project"].add( tomlkit.comment("See https://www.python.org/dev/peps/pep-0621/")) pyproject["project"].update(project_data) pyproject["build-system"] = { "requires": ["pdm-pep517"], "build-backend": "pdm.pep517.api", } project.pyproject = pyproject project.write_pyproject()
def build_table(header=None): try: t = table() if header: t.add(comment(header)) for k, v in dic.items(): if isinstance(v, Dict): if v.get("comment"): t.add(comment(v["comment"])) elif v.get("comments"): for c in v["comments"]: t.add(comment(c)) else: raise _ERR_NON_VAILD_DEFAULT_CFG t.add(k, v["value"]) t.add(nl()) else: t.add(k, v) except: raise _ERR_NON_VAILD_DEFAULT_CFG return t
def write_example_config(example_file_path: str): """ Writes an example config file using the config values declared so far :param example_file_path: path to write to """ document = tomlkit.document() for header_line in _get_header(): document.add(tomlkit.comment(header_line)) config_keys = _aggregate_config_values(ConfigValue.config_values) _add_config_values_to_toml_object(document, config_keys) _doc_as_str = document.as_string().replace(f'"{_NOT_SET}"', '') with open(example_file_path, 'w') as stream: stream.write(_doc_as_str)
def add_big_title(docu, title: str, width: int = 50): def side(width): return "|" + " " * (width - 2) + "|" lines = [ "=" * width, side(width), f"|{title.center(width-2)}|", side(width), "=" * width, ] for _ in range(3): docu.add(nl()) for line in lines: docu.add(comment(line))
def test_top_level_keys_are_put_at_the_root_of_the_document(): doc = document() doc.add(comment("Comment")) doc["foo"] = {"name": "test"} doc["bar"] = 1 expected = """\ # Comment bar = 1 [foo] name = "test" """ assert doc.as_string() == expected
def save_params(ctx): cmd = ctx.info_name params = {k: v for k, v in ctx.params.items() if v} save_type = params.pop("save_config") try: config_file = Path(ctx.obj["config_file"]) except TypeError: raise click.BadParameter( "use `--save-config` with a specified `--config`") if config_file.is_file(): console.print(f"Updating current config file at [b cyan]{config_file}") with config_file.open("r") as f: config = tomlkit.load(f) else: console.print(f"Staring a config file at [b cyan]{config_file}") config = tomlkit.document() if save_type == "explicit": params = { k: v for k, v in params.items() if ctx.get_parameter_source(k).value != 3 } # sanitize the path's for writing to toml for k in ["input", "pipeline", "output"]: if k in params.keys(): params[k] = str(params[k]) config[cmd] = params null_hints = { "extract": ["filter_count", "fastp_args"], "merge": ["fastp_args"] } if cmd in null_hints.keys() and save_type == "full": for param in null_hints[cmd]: if param not in params.keys(): config[cmd].add(tomlkit.comment(f"{param} =")) with config_file.open("w") as f: f.write(tomlkit.dumps(config)) console.print("Exiting...") ctx.exit()
def set_lock_data(self, root, packages): # type: (...) -> bool files = table() packages = self._lock_packages(packages) # Retrieving hashes for package in packages: if package["name"] not in files: files[package["name"]] = [] for f in package["files"]: file_metadata = inline_table() for k, v in sorted(f.items()): file_metadata[k] = v files[package["name"]].append(file_metadata) if files[package["name"]]: files[package["name"]] = item(files[package["name"]]).multiline(True) del package["files"] lock = document() lock.add(comment("@" + "generated")) lock["package"] = packages if root.extras: lock["extras"] = { extra: [dep.pretty_name for dep in deps] for extra, deps in root.extras.items() } lock["metadata"] = { "lock-version": self._VERSION, "python-versions": root.python_versions, "content-hash": self._content_hash, "files": files, } if not self.is_locked() or lock != self.lock_data: self._write_lock_data(lock) return True return False
def _add_items( table: Union[tomlkit.toml_document.TOMLDocument, tomlkit.items.Table], items: OrderedDict[str, Item], ): for k, item in items.items(): if isinstance(item.value, OrderedDict): subtable = tomlkit.table() if item.comment: subtable.add(tomlkit.comment(item.comment)) _add_items(subtable, item.value) table.add(k, subtable) else: table.add(k, item.value) if item.comment: try: table[k].comment(item.comment) except AttributeError: # TODO pass
def to_toml( self, path_or_buff=None, aot: str = "row", comment: Union[None, str, Sequence[str]] = None, mode: str = "w", **kwargs, ) -> __qualname__: r""" Writes a TOML file. .. caution:: This is provided as a preview. It may have issues and may change. Args: path_or_buff: Path or buffer aot: The name of the array of tables (i.e. ``[[ table ]]``) comment: Comment line(s) to add at the top of the document mode: 'w' (write) or 'a' (append) kwargs: Passed to :meth:`typeddfs.utils.Utils.write` """ import tomlkit from tomlkit.toml_document import TOMLDocument comment = [] if comment is None else ( [comment] if isinstance(comment, str) else comment) df = self.vanilla_reset() data = [df.iloc[i].to_dict() for i in range(len(df))] aot_obj = ParseUtils.dicts_to_toml_aot(data) doc: TOMLDocument = tomlkit.document() for c in comment: doc.add(tomlkit.comment(c)) doc[aot] = aot_obj txt = tomlkit.dumps(doc) return IoUtils.write(path_or_buff, txt, mode=mode, **kwargs)
def toml_file_header(doc): doc.add( comment( "This file is part of the McuDB project, https://github.com/McuDB") ) doc.add(comment("")) doc.add(comment("The MIT License (MIT)")) doc.add(comment("")) doc.add(comment("Copyright (c) 2020 Mark Olsson for McuDB")) doc.add(comment("")) doc.add( comment( "Permission is hereby granted, free of charge, to any person obtaining a copy" )) doc.add( comment( 'of this software and associated documentation files (the "Software"), to deal' )) doc.add( comment( "in the Software without restriction, including without limitation the rights" )) doc.add( comment( "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell" )) doc.add( comment( "copies of the Software, and to permit persons to whom the Software is" )) doc.add( comment("furnished to do so, subject to the following conditions:")) doc.add(comment("")) doc.add( comment( "The above copyright notice and this permission notice shall be included in" )) doc.add(comment("all copies or substantial portions of the Software.")) doc.add(comment("")) doc.add( comment( 'THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR' )) doc.add( comment( "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY," )) doc.add( comment( "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE" )) doc.add( comment( "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER" )) doc.add( comment( "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM," )) doc.add( comment( "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN" )) doc.add(comment("THE SOFTWARE."))
def decompose(output, fill_descriptor_path, source, override): logger = logging.getLogger(__name__) logger.debug("Loading model from %s", source) source = Path(source) if output is None: output = get_default_output_directory(source) else: output = Path(output) output.mkdir(parents=True, exist_ok=True) fill_descriptor = tk.document() fill_descriptor.add( tk.comment(f"This is a decomposition of \"{source.name}\" model")) parse_result: ParseResult = from_file(source) if parse_result.title: fill_descriptor.append("title", parse_result.title) model: Universe = parse_result.universe if model.comment: fill_descriptor.append("comment", model.comment) fill_descriptor.append("created", datetime.now()) fill_descriptor.add(tk.nl()) already_processed_universes = set() for c in model: fill = c.options.pop('FILL', None) if fill: universe = fill['universe'] words = [f'FILL={universe.name()}'] transform = fill.get('transform', None) if transform: words[0] = '*' + words[0] words.append('(') words.extend(transform.get_words()) words.append(')') comm = c.options.get('comment', []) comm.append(''.join(words)) c.options['comment'] = comm descriptor = tk.table() universe_name = universe.name() fn = f'u{universe_name}.i' descriptor['universe'] = universe_name if transform: name = transform.name() if name is None: # The transformation is anonymous, so, store it's specification # omitting redundant '*', TR0 words, and interleaving space tokens descriptor['transform'] = tk.array( transform.mcnp_words()[2:][1::2]) else: descriptor['transform'] = name descriptor['file'] = fn fill_descriptor.append(str(c.name()), descriptor) fill_descriptor.add(tk.nl()) if universe_name not in already_processed_universes: move_universe_attribute_to_comments(universe) save_mcnp(universe, output / fn, override) logger.debug("The universe %s has been saved to %s", universe_name, fn) already_processed_universes.add(universe_name) with open(output / fill_descriptor_path, "w") as fid: res = tk.dumps(fill_descriptor) fid.write(res) envelopes_path = output / "envelopes.i" save_mcnp(model, envelopes_path, override) logger.debug("The envelopes are saved to %s", envelopes_path)
def write_toml( imccontainer, output_dirpath, output_tomlbase, progressfunc=None, ): """write an ImcContainer to a plaintext toml file and extracted .sub.imc files imccontainer: ImcContainer instance output_dirpath: new directory to create and to unpack IMC contents to output_tomlbase: base filename to which to write the .IMC.toml file (will be put in output_dirpath) progressfunc: function to run whenever a subsong is about to be extracted from the ImcContainer. It must accept three arguments: an int subsong index, an int total number of subsongs, and an imccontainer.ContainerSubsong instance """ # for zero-padding the number in the filename of each extracted subsong, 03 vs 003 if imccontainer.num_subsongs <= 100: ssidx_width = 2 else: ssidx_width = len(str(imccontainer.num_subsongs - 1)) # prepare toml dir and toml file. writing a bit early here, but if dir/file can't be # written, it's better to error before the time-consuming part instead of after tomldir = output_dirpath tomlpath = os.path.join(tomldir, output_tomlbase) os.makedirs(tomldir, exist_ok=True) with open(tomlpath, "wt", encoding="utf-8") as tomlfile: try: tomldoc = tomlkit.parse(_toml_header) tomldoc.add("Subsong", tomlkit.aot()) for ssidx, csubsong in enumerate(imccontainer.csubsongs): if progressfunc is not None: progressfunc(ssidx, imccontainer.num_subsongs, csubsong) # Extract subsong to file ss_basefilename = f"{ssidx:0{ssidx_width}}.{csubsong.name}{SUBIMC_EXT}" # sanitize dir separators out of filename so it doesn't screw up ss_basefilename = ss_basefilename.replace(os.path.sep, "_") with open(os.path.join(tomldir, ss_basefilename), "wb") as subsongfile: subsongfile.write(csubsong.get_imcdata()) # Gather & add this subsong's info to toml document tomlsubsong = tomlkit.table() tomlsubsong["name"] = csubsong.name tomlsubsong["loadmode"] = csubsong.loadmode tomlsubsong["basefile"] = ss_basefilename channel_nums = "".join( str(x) for x in range(1, csubsong.num_channels + 1)) comment = (f'channels-{channel_nums}-to-{channel_nums} = "' f"replacement-audio{SUBSONG_FORMATS['wav']}" '"') tomlsubsong.add(tomlkit.comment(comment)) # Gather & add this subsong's diff-patch-info to toml document, # omitting anything with a None value tomldiffpinfo = tomlkit.table().indent(4) if csubsong.rawname is not None: # bytes to ints tomldiffpinfo["rawname"] = [x for x in csubsong.rawname] if not (csubsong.unk1, csubsong.unk2) == (None, None): unk1 = 0 if csubsong.unk1 is None else csubsong.unk1 unk2 = 0 if csubsong.unk2 is None else csubsong.unk2 tomldiffpinfo["unk"] = [unk1, unk2] # saving original block layout if csubsong.original_block_layout is not None: ofbp, obpc = csubsong.original_block_layout # convert from possibly a tomlkit Integer (which retains indent) to # a plain ol' int to prevent indent problems when rewritten to toml ofbp, obpc = int(ofbp), int(obpc) if ofbp is not None: tomldiffpinfo["frames-per-block"] = ofbp if obpc is not None: tomldiffpinfo["blocks-per-channel"] = obpc if tomldiffpinfo: # if tomldiffpinfo is empty, we won't bother tomlsubsong.add("diff-patch-info", tomldiffpinfo) # noinspection PyArgumentList tomldoc["Subsong"].append(tomlsubsong) except Exception: # noinspection PyBroadException try: # For debug output, try to write current tomldoc + error traceback import traceback tb = traceback.format_exc() tomlfile.write(tomldoc.as_string()) tomlfile.write( "\n\n# == ERROR ENCOUNTERED DURING WRITING ==\n#") tomlfile.write("\n#".join(tb.split("\n"))) except Exception: pass raise tomlfile.write(tomldoc.as_string())
def generate_yombo_toml(self, display_extra_warning: Optional[bool] = None ) -> List[str]: """ Generates the output for yombo.toml in TOML format - like INI, but better. If display_extra_warning is True, will display an even more nasty message to not edit this file while its running. :param display_extra_warning: :return: """ def wrap_comment_lines(text, trailing_comment: Optional[bool] = None, contents: Optional = None): """ Creates a comment that wraps long lines. """ output = wrap(text, 75, break_long_words=False) if contents is None: contents = ini_contents for line in output: if trailing_comment is True: contents.add(tk.comment(f"{line: <76} #")) else: contents.add(tk.comment(f"{line}")) ini_contents = tk.document() meta_contents = tk.document() meta_contents.add( tk.comment( "################################################################################" )) meta_contents.add( tk.comment( " Stores additional information about yombo.toml, non-essential." )) meta_contents.add( tk.comment( "################################################################################" )) meta_contents.add(tk.nl()) ini_contents.add(tk.comment(" ")) ini_contents.add(tk.comment(_("lib.configs.yombo_toml.about"))) ini_contents.add(tk.comment(" ")) if display_extra_warning is True: ini_contents.add(tk.comment(" ")) ini_contents.add( tk.comment( "###############################################################################" )) ini_contents.add( tk.comment(f'{_("lib.configs.yombo_toml.warning"):^76}')) ini_contents.add( tk.comment( "###############################################################################" )) ini_contents.add( tk.comment(_("lib.configs.yombo_toml.still_running"))) ini_contents.add( tk.comment( _("lib.configs.yombo_toml.still_running_pid", number=str(self.pid)))) ini_contents.add( tk.comment( "###############################################################################" )) else: wrap_comment_lines(_("lib.configs.yombo_toml.dont_edit"), False) ini_contents.add(tk.comment(" ")) ini_contents.add(tk.comment(" ")) def add_config_item(section, current_path, data): """ Recursive function to output config contents. """ config_data = data.to_dict(include_meta=False) item_msgid = f'lib.configs.item.{".".join(current_path)}' item_description = _(item_msgid) if item_msgid != item_description: wrap_comment_lines(item_description, contents=section) section.add(current_path[-1], config_data["value"]) if item_msgid != item_description: section.add(tk.nl()) meta_data = { key: config_data[key] for key in config_data.keys() - {'value', 'config'} } access_dict_set(config_data["config"], meta_contents, meta_data) def add_dictionary(current_path, data): """ Recursive function to output config contents. """ section = tk.table() if len(current_path): ini_contents.add(tk.nl()) ini_contents.add(tk.nl()) ini_contents.add( tk.comment( "##############################################################################" )) ini_contents.add( tk.comment(f'{".".join(current_path): ^76} #')) ini_contents.add(tk.comment(f"{'': <76} #")) wrap_comment_lines( _(f'lib.configs.section.{".".join(current_path)}'), True, contents=ini_contents) ini_contents.add( tk.comment( "##############################################################################" )) for config, items in data.items(): temp_path = deepcopy(current_path) temp_path.append(config) if isinstance(items, ConfigItem): if items.value is None: continue add_config_item(section, temp_path, items) else: add_dictionary(temp_path, items) if len(current_path): ini_contents[".".join(current_path)] = section add_dictionary([], self.configs) return tk.dumps(ini_contents), tk.dumps(meta_contents)
def test_build_example(example): content = example("example") doc = document() doc.add(comment("This is a TOML document. Boom.")) doc.add(nl()) doc.add("title", "TOML Example") owner = table() owner.add("name", "Tom Preston-Werner") owner.add("organization", "GitHub") owner.add("bio", "GitHub Cofounder & CEO\\nLikes tater tots and beer.") owner.add("dob", datetime.datetime(1979, 5, 27, 7, 32, tzinfo=_utc)) owner["dob"].comment("First class dates? Why not?") doc.add("owner", owner) database = table() database["server"] = "192.168.1.1" database["ports"] = [8001, 8001, 8002] database["connection_max"] = 5000 database["enabled"] = True doc["database"] = database servers = table() servers.add(nl()) c = comment( "You can indent as you please. Tabs or spaces. TOML don't care." ).indent(2) c.trivia.trail = "" servers.add(c) alpha = table() servers.append("alpha", alpha) alpha.indent(2) alpha.add("ip", "10.0.0.1") alpha.add("dc", "eqdc10") beta = table() servers.append("beta", beta) beta.add("ip", "10.0.0.2") beta.add("dc", "eqdc10") beta.add("country", "中国") beta["country"].comment("This should be parsed as UTF-8") beta.indent(2) doc["servers"] = servers clients = table() doc.add("clients", clients) clients["data"] = item( [["gamma", "delta"], [1, 2]]).comment("just an update to make sure parsers support it") clients.add(nl()) clients.add(comment("Line breaks are OK when inside arrays")) clients["hosts"] = array("""[ "alpha", "omega" ]""") doc.add(nl()) doc.add(comment("Products")) products = aot() doc["products"] = products hammer = table().indent(2) hammer["name"] = "Hammer" hammer["sku"] = 738594937 nail = table().indent(2) nail["name"] = "Nail" nail["sku"] = 284758393 nail["color"] = "gray" products.append(hammer) products.append(nail) assert content == doc.as_string()
if pages is None: pages = data["pagination"]["pages"] for election in data["results"]: lower_name = states.by_postal_code[election["election_state"]] d = os.path.join("states", lower_name) elections_raw_fn = os.path.join(d, "elections_raw.toml") if os.path.exists(elections_raw_fn): with open(elections_raw_fn, "r") as f: elections_raw_file = tomlkit.loads(f.read()) else: elections_raw_file = tomlkit.document() elections_raw_file.add( tomlkit.comment( "This holds raw scraped data for reference and shouldn't be hand edited." )) key = election["election_date"].replace("-", "") if lower_name == "idaho" and key == "20200602": key = "20200519" if lower_name == "north_carolina" and key == "20200623": key = "20200512" if key not in elections_raw_file and election[ "election_type_full"] != "Convention" and election[ "election_type_id"] != "CAU": print(election) print() continue empty_keys = [] for k in election:
def _dump_v2_manifest_as_toml(manifest): import re from tomlkit import document, nl, table, dumps, comment toml_manifest = document() toml_manifest.add("packaging_format", 2) toml_manifest.add(nl()) toml_manifest.add("id", manifest["id"]) toml_manifest.add("name", manifest["name"]) for lang, value in manifest["description"].items(): toml_manifest.add(f"description.{lang}", value) toml_manifest.add(nl()) toml_manifest.add("version", manifest["version"]) toml_manifest.add(nl()) toml_manifest.add("maintainers", manifest["maintainers"]) upstream = table() for key, value in manifest["upstream"].items(): upstream[key] = value toml_manifest["upstream"] = upstream integration = table() for key, value in manifest["integration"].items(): integration.add(key, value) integration["architectures"].comment( 'FIXME: can be replaced by a list of supported archs using the dpkg --print-architecture nomenclature (amd64/i386/armhf/arm64/armel), for example: ["amd64", "i386"]' ) integration["ldap"].comment( 'FIXME: replace with true, false, or "not_relevant"') integration["sso"].comment( 'FIXME: replace with true, false, or "not_relevant"') integration["disk"].comment( 'FIXME: replace with an **estimate** minimum disk requirement. e.g. 20M, 400M, 1G, ...' ) integration["ram.build"].comment( 'FIXME: replace with an **estimate** minimum ram requirement. e.g. 50M, 400M, 1G, ...' ) integration["ram.runtime"].comment( 'FIXME: replace with an **estimate** minimum ram requirement. e.g. 50M, 400M, 1G, ...' ) toml_manifest["integration"] = integration install = table() for key, value in manifest["install"].items(): install[key] = table() install[key].indent(4) if key in ["domain", "path", "admin", "is_public", "password"]: install[key].add( comment( "this is a generic question - ask strings are automatically handled by Yunohost's core" )) for lang, value2 in value.get("ask", {}).items(): install[key].add(f"ask.{lang}", value2) for lang, value2 in value.get("help", {}).items(): install[key].add(f"help.{lang}", value2) for key2, value2 in value.items(): if key2 in ["ask", "help"]: continue install[key].add(key2, value2) toml_manifest["install"] = install resources = table() for key, value in manifest["resources"].items(): resources[key] = table() resources[key].indent(4) for key2, value2 in value.items(): resources[key].add(key2, value2) if key == "apt" and key2 == "extras": for extra in resources[key][key2]: extra.indent(8) toml_manifest["resources"] = resources toml_manifest_dump = dumps(toml_manifest) regex = re.compile(r'\"((description|ask|help)\.[a-z]{2})\"') toml_manifest_dump = regex.sub(r'\1', toml_manifest_dump) toml_manifest_dump = toml_manifest_dump.replace('"ram.build"', "ram.build") toml_manifest_dump = toml_manifest_dump.replace('"ram.runtime"', "ram.runtime") toml_manifest_dump = toml_manifest_dump.replace('"main.url"', "main.url") toml_manifest_dump = toml_manifest_dump.replace('"main.default"', "main.default") return toml_manifest_dump