def generate_toml_help(config_cls, *, parent=None): if parent is None: parent = tomlkit.table() doclines = trim(config_cls.__doc__).split("\n") for line in doclines: parent.add(tomlkit.comment(line)) parent.add(tomlkit.nl()) for attrib in attr.fields(config_cls): meta = attrib.metadata.get(CNF_KEY) if attr.has(attrib.type): # yield (attrib.name,), attrib sub_doc = generate_toml_help(attrib.type) parent.add(attrib.name, sub_doc) else: if meta: parent.add(tomlkit.comment(meta.help)) if attrib.default in (missing, attr.NOTHING): parent.add(tomlkit.comment(f"{attrib.name} =")) else: default = (attrib.default() if callable(attrib.default) else attrib.default) parent.add(attrib.name, default) parent.add(tomlkit.nl()) return parent
def add_dictionary(current_path, data): """ Recursive function to output config contents. """ section = tk.table() if len(current_path): ini_contents.add(tk.nl()) ini_contents.add(tk.nl()) ini_contents.add( tk.comment( "##############################################################################" )) ini_contents.add( tk.comment(f'{".".join(current_path): ^76} #')) ini_contents.add(tk.comment(f"{'': <76} #")) wrap_comment_lines( _(f'lib.configs.section.{".".join(current_path)}'), True, contents=ini_contents) ini_contents.add( tk.comment( "##############################################################################" )) for config, items in data.items(): temp_path = deepcopy(current_path) temp_path.append(config) if isinstance(items, ConfigItem): if items.value is None: continue add_config_item(section, temp_path, items) else: add_dictionary(temp_path, items) if len(current_path): ini_contents[".".join(current_path)] = section
def write_toml(self, filename: Union[str, Path]) -> PhantomConfig: """Write config to TOML file. Parameters ---------- filename The name of the TOML output file. """ # TODO: writing to TOML does not preserve the comments. document = tomlkit.document() if self.header is not None: for line in self.header: document.add(tomlkit.comment(line)) document.add(tomlkit.nl()) d = self.to_dict() for block_key, block_val in d.items(): block = tomlkit.table() if isinstance(block_val, dict): for name, item in block_val.items(): value, comment = item if isinstance(value, datetime.timedelta): value = _convert_timedelta_to_str(value) block.add(tomlkit.nl()) if comment is not None: block.add(tomlkit.comment(comment)) block.add(name, value) document.add(block_key, block) with open(filename, 'w') as fp: fp.write(tomlkit.dumps(document)) return self
def dump(self, configs, path): doc = tomlkit.document() root_comment = getattr(configs, '__doc__', '') def add_comment(sec, comment): for line in textwrap.wrap(comment.strip()): sec.add(tomlkit.comment(line)) def add_value(sec, k, v): is_none = v is None if is_none: sec.add(tomlkit.comment(f'{k} = # Uncomment to use')) else: sec.add(k, v) return not is_none if root_comment: add_comment(doc, root_comment.strip()) doc.add(tomlkit.nl()) for p, value, prop in configs.get_prop_paths(): section = doc key = p if '.' in p: parts = p.split('.') key = parts[-1] for part in parts[:-1]: section = section[part] if isinstance(value, Nestable): # Just add a table for those. table = tomlkit.table() section.add(key, table) if prop.comment is not None: add_comment(table, prop.comment) table.add(tomlkit.nl()) else: if prop.comment is not None: if len(prop.comment) > 40: # Only short comments are inlined. section.add(tomlkit.nl()) add_comment(section, prop.comment) add_value(section, key, value) else: good = add_value(section, key, value) if good: if isinstance(value, bool): item = section.item(key) else: item = section[key] item.comment(prop.comment) else: add_value(section, key, value) with open(path, 'w') as file: file.write(tomlkit.dumps(doc))
def source_to_table(source: Source) -> Table: from tomlkit import nl from tomlkit import table source_table: Table = table() for key, value in source.to_dict().items(): source_table.add(key, value) source_table.add(nl()) return source_table
def init(self, title=None): table = tomlkit.table() table["title"] = title = (title or self.config.title or os.path.basename(os.path.abspath("."))) table["preset"] = self.config.preset.name source_dir = str(self.config.source_dir) output_dir = str(self.config.output_dir) if source_dir != self.config.default_source_dir: table["source_dir"] = source_dir if output_dir != self.config.default_output_dir: table["output_dir"] = output_dir table.add(tomlkit.nl()) if self.mudkip.exists(): doc = self.mudkip.read() if "mudkip" not in doc: doc["mudkip"] = table else: doc["mudkip"].update(table) self.mudkip.write(doc) elif self.pyproject.exists(): doc = self.pyproject.read() tool = None try: tool = doc["tool"] tool["mudkip"].update(table) except KeyError: if tool is None: doc["tool"] = {"mudkip": table} else: tool["mudkip"] = table self.pyproject.write(doc) else: self.mudkip.write(tomlkit.document().add("mudkip", table)) index_rst = self.config.source_dir / "index.rst" index_md = self.config.source_dir / "index.md" if not index_rst.is_file() and not index_md.is_file(): index_rst.write_text(f"{title}\n{'=' * len(title)}\n")
def add_big_title(docu, title: str, width: int = 50): def side(width): return "|" + " " * (width - 2) + "|" lines = [ "=" * width, side(width), f"|{title.center(width-2)}|", side(width), "=" * width, ] for _ in range(3): docu.add(nl()) for line in lines: docu.add(comment(line))
def write_to_file(parameters: dataclass, filename: Union[str, Path], *, header: str = None, overwrite: bool = False) -> None: """ Write the parameters to TOML file. Parameters ---------- parameters : dataclass The parameters dataclass to write to file. filename : str or Path The name of the file to write. Should have extension '.toml'. header : str A header written as a TOML comment at the top of the file. overwrite : bool, default=False Whether to overwrite if the file exists. """ if not overwrite: if pathlib.Path(filename).exists(): raise ValueError( 'file already exists, add overwrite=True to overwrite') document = tomlkit.document() if header is not None: header = textwrap.wrap(header, 70) for header_ in header: document.add(tomlkit.comment(header_)) for param in dataclasses.fields(parameters): name = param.name description = textwrap.wrap(param.metadata['description'], 70) value = getattr(parameters, param.name) if isinstance(value, tuple): value = list(value) document.add(tomlkit.nl()) for desc in description: document.add(tomlkit.comment(desc)) document.add(name, value) with open(filename, 'w') as fp: fp.write(tomlkit.dumps(document)) return
def _migrate_source(self): if "source" not in self._pipenv: return for s in self._pipenv["source"]: if s["name"] == "pypi": continue source = table() source.add("name", s["name"]) source.add("url", s["url"]) source.add(nl()) if "source" not in self._pyproject["tool"]["poetry"]: self._pyproject["tool"]["poetry"]["source"] = aot() self._pyproject["tool"]["poetry"]["source"].append(source)
def add_config_item(section, current_path, data): """ Recursive function to output config contents. """ config_data = data.to_dict(include_meta=False) item_msgid = f'lib.configs.item.{".".join(current_path)}' item_description = _(item_msgid) if item_msgid != item_description: wrap_comment_lines(item_description, contents=section) section.add(current_path[-1], config_data["value"]) if item_msgid != item_description: section.add(tk.nl()) meta_data = { key: config_data[key] for key in config_data.keys() - {'value', 'config'} } access_dict_set(config_data["config"], meta_contents, meta_data)
def dicts_to_toml_aot(cls, dicts: Sequence[Mapping[str, Any]]): """ Make a tomlkit Document consisting of an array of tables ("AOT"). Args: dicts: A sequence of dictionaries Returns: A tomlkit`AoT<https://github.com/sdispater/tomlkit/blob/master/tomlkit/items.py>`_ (i.e. ``[[array]]``) """ import tomlkit aot = tomlkit.aot() for ser in dicts: tab = tomlkit.table() aot.append(tab) for k, v in ser.items(): tab.add(k, v) tab.add(tomlkit.nl()) return aot
def build_table(header=None): try: t = table() if header: t.add(comment(header)) for k, v in dic.items(): if isinstance(v, Dict): if v.get("comment"): t.add(comment(v["comment"])) elif v.get("comments"): for c in v["comments"]: t.add(comment(c)) else: raise _ERR_NON_VAILD_DEFAULT_CFG t.add(k, v["value"]) t.add(nl()) else: t.add(k, v) except: raise _ERR_NON_VAILD_DEFAULT_CFG return t
def write_toml( xgm: XgmContainer, output_dirpath: str, output_tomlbase: str, progressfunc: Callable = None, ) -> None: """write an XgmContainer to a plaintext toml file and extracted contents :param xgm: XgmContainer instance :param output_dirpath: new directory to create and to unpack XGM contents to :param output_tomlbase: base filename to which to write the .XGM.toml file (will be put in output_dirpath) :param progressfunc: function to run whenever an item of the XgmContainer is about to be processed. It must accept three arguments: an int item index, an int total number of items, and an xgmitem.XgmImageItem/XgmModelItem instance """ # prepare toml dir and toml file. writing a bit early here, but if dir/file can't be # written, it's better to error before the time-consuming part instead of after tomldir = output_dirpath tomlpath = os.path.join(tomldir, output_tomlbase) os.makedirs(tomldir, exist_ok=True) num_imageitems, num_modelitems = len(xgm.imageitems), len(xgm.modelitems) with open(tomlpath, "wt", encoding="utf-8") as tomlfile: try: tomldoc = tomlkit.parse(_toml_header) tomldoc.add("ImageItem", tomlkit.aot()) for idx, imageitem in enumerate(xgm.imageitems): if progressfunc is not None: progressfunc(idx, num_imageitems, imageitem) # Extract image item to file imageitem_outname = imageitem.name16.replace(os.path.sep, "_") # sanitize with open(os.path.join(tomldir, imageitem_outname), "wb") as itemfile: itemfile.write(imageitem.filedata) # Gather & add this image item's info to toml document tomlimage = tomlkit.table() tomlimage["name16"] = imageitem.name16 if imageitem_outname != imageitem.name16: tomlimage["file-path"] = imageitem_outname # noinspection PyArgumentList tomldoc["ImageItem"].append(tomlimage) if xgm.modelitems: tomldoc.add(tomlkit.nl()) tomldoc.add("ModelItem", tomlkit.aot()) for idx, modelitem in enumerate(xgm.modelitems): if progressfunc is not None: progressfunc(idx, num_modelitems, modelitem) # Extract model item to file modelitem_outname = modelitem.name16.replace(os.path.sep, "_") # sanitize with open(os.path.join(tomldir, modelitem_outname), "wb") as itemfile: itemfile.write(modelitem.filedata) # Extract animation entry data to file animsep_outname = replaceext(modelitem_outname, ANIMSEP_EXT) with open(os.path.join(tomldir, animsep_outname), "wb") as animsepfile: animsepfile.write(modelitem.animdata) # Gather & add this model item's info to toml document tomlmodel = tomlkit.table() tomlmodel["name16"] = modelitem.name16 if modelitem_outname != modelitem.name16: tomlimage["file-path"] = modelitem_outname # noinspection PyArgumentList tomldoc["ModelItem"].append(tomlmodel) tomlfile.write(tomldoc.as_string()) except Exception: # noinspection PyBroadException try: # For debug output, try to write current tomldoc + error traceback import traceback tb = traceback.format_exc() tomlfile.write(tomldoc.as_string()) tomlfile.write( "\n\n# == ERROR ENCOUNTERED DURING WRITING ==\n#") tomlfile.write("\n#".join(tb.split("\n"))) except Exception: pass raise
def source_to_table(source: "Source") -> "Table": source_table: "Table" = table() for key, value in source.to_dict().items(): source_table.add(key, value) source_table.add(nl()) return source_table
def test_build_example(example): content = example("example") doc = document() doc.add(comment("This is a TOML document. Boom.")) doc.add(nl()) doc.add("title", "TOML Example") owner = table() owner.add("name", "Tom Preston-Werner") owner.add("organization", "GitHub") owner.add("bio", "GitHub Cofounder & CEO\\nLikes tater tots and beer.") owner.add("dob", datetime.datetime(1979, 5, 27, 7, 32, tzinfo=_utc)) owner["dob"].comment("First class dates? Why not?") doc.add("owner", owner) database = table() database["server"] = "192.168.1.1" database["ports"] = [8001, 8001, 8002] database["connection_max"] = 5000 database["enabled"] = True doc["database"] = database servers = table() servers.add(nl()) c = comment( "You can indent as you please. Tabs or spaces. TOML don't care." ).indent(2) c.trivia.trail = "" servers.add(c) alpha = table() servers.append("alpha", alpha) alpha.indent(2) alpha.add("ip", "10.0.0.1") alpha.add("dc", "eqdc10") beta = table() servers.append("beta", beta) beta.add("ip", "10.0.0.2") beta.add("dc", "eqdc10") beta.add("country", "中国") beta["country"].comment("This should be parsed as UTF-8") beta.indent(2) doc["servers"] = servers clients = table() doc.add("clients", clients) clients["data"] = item( [["gamma", "delta"], [1, 2]]).comment("just an update to make sure parsers support it") clients.add(nl()) clients.add(comment("Line breaks are OK when inside arrays")) clients["hosts"] = array("""[ "alpha", "omega" ]""") doc.add(nl()) doc.add(comment("Products")) products = aot() doc["products"] = products hammer = table().indent(2) hammer["name"] = "Hammer" hammer["sku"] = 738594937 nail = table().indent(2) nail["name"] = "Nail" nail["sku"] = 284758393 nail["color"] = "gray" products.append(hammer) products.append(nail) assert content == doc.as_string()
toml_file = os.path.join( os.path.dirname(__file__), "..", "data", "mcu", "stm", path.stem.lower() + ".toml", ) toml = TOMLFile(toml_file) doc = document() toml_file_header(doc) doc.add(nl()) doc.add("name", xml_root.get("RefName")) doc.add("object_type", "mcu") if xml_root.get("Package"): doc.add("package", xml_root.get("Package")) if xml_root.get("Family"): doc.add("family", xml_root.get("Family")) if xml_root.get("Line"): doc.add("line", xml_root.get("Line")) if xml_root.get("HasPowerPad"): doc.add("has_power_pad", toml_boolean(xml_root.get("HasPowerPad"))) if xml_root.findtext("Core", None, xml_root.nsmap): doc.add("core", xml_root.findtext("./Core", None, xml_root.nsmap)) if xml_root.findtext("Frequency", None, xml_root.nsmap): doc.add(
def create_pyproject_from_package(cls, package: Package, path: Path | None = None ) -> TOMLDocument: import tomlkit from poetry.utils.dependency_specification import dependency_to_specification pyproject: dict[str, Any] = tomlkit.document() pyproject["tool"] = tomlkit.table(is_super_table=True) content: dict[str, Any] = tomlkit.table() pyproject["tool"]["poetry"] = content content["name"] = package.name content["version"] = package.version.text content["description"] = package.description content["authors"] = package.authors content["license"] = package.license.id if package.license else "" if package.classifiers: content["classifiers"] = package.classifiers for key, attr in { ("documentation", "documentation_url"), ("repository", "repository_url"), ("homepage", "homepage"), ("maintainers", "maintainers"), ("keywords", "keywords"), }: value = getattr(package, attr, None) if value: content[key] = value readmes = [] for readme in package.readmes: readme_posix_path = readme.as_posix() with contextlib.suppress(ValueError): if package.root_dir: readme_posix_path = readme.relative_to( package.root_dir).as_posix() readmes.append(readme_posix_path) if readmes: content["readme"] = readmes optional_dependencies = set() extras_section = None if package.extras: extras_section = tomlkit.table() for extra in package.extras: _dependencies = [] for dependency in package.extras[extra]: _dependencies.append(dependency.name) optional_dependencies.add(dependency.name) extras_section[extra] = _dependencies optional_dependencies = set(optional_dependencies) dependency_section = content["dependencies"] = tomlkit.table() dependency_section["python"] = package.python_versions for dep in package.all_requires: constraint: DependencySpec | str = dependency_to_specification( dep, tomlkit.inline_table()) if not isinstance(constraint, str): if dep.name in optional_dependencies: constraint["optional"] = True if len(constraint) == 1 and "version" in constraint: constraint = cast(str, constraint["version"]) elif not constraint: constraint = "*" for group in dep.groups: if group == MAIN_GROUP: dependency_section[dep.name] = constraint else: if "group" not in content: content["group"] = tomlkit.table(is_super_table=True) if group not in content["group"]: content["group"][group] = tomlkit.table( is_super_table=True) if "dependencies" not in content["group"][group]: content["group"][group][ "dependencies"] = tomlkit.table() content["group"][group]["dependencies"][ dep.name] = constraint if extras_section: content["extras"] = extras_section pyproject = cast(TOMLDocument, pyproject) pyproject.add(tomlkit.nl()) if path: path.joinpath("pyproject.toml").write_text(pyproject.as_string(), encoding="utf-8") return pyproject
try: with open(filename, "w") as f: f.write(dumps(remote)) log("Instance configuration stored.", lvl=debug) except PermissionError: log( "PermissionError: Could not write instance management configuration file", lvl=error, ) abort(EXIT_NO_PERMISSION) configuration_template = document() configuration_template.add(comment("Isomer Instance Management Configuration")) configuration_template.add(comment("Created on %s" % std_now())) configuration_template.add(nl()) meta = table() meta.add("platform", "amd64") meta["platform"].comment("Set to rpi for advanced Raspberry Pi support") meta.add("distribution", "debian") meta["distribution"].comment("Currently, only debian supported") meta.add("init", "systemd") meta["init"].comment("Currently, only systemd supported") meta.add("prefix", "") configuration_template.add("meta", meta) instance_template = table() instance_template.add("name", "") instance_template.add("contact", "")
def _create_checker_section( checker: str, options: list[OptionsData], linter: PyLinter ) -> str: checker_string = f".. _{checker}-options:\n\n" checker_string += get_rst_title(f"``{checker.capitalize()}`` **Checker**", "-") toml_doc = tomlkit.document() pylint_tool_table = tomlkit.table(is_super_table=True) toml_doc.add(tomlkit.key(["tool", "pylint"]), pylint_tool_table) checker_table = tomlkit.table() for option in sorted(options, key=lambda x: x.name): checker_string += get_rst_title(f"--{option.name}", '"') checker_string += f"*{option.optdict.get('help')}*\n\n" if option.optdict.get("default") == "": checker_string += '**Default:** ``""``\n\n\n' else: checker_string += f"**Default:** ``{option.optdict.get('default')}``\n\n\n" # Start adding the option to the toml example if option.optdict.get("hide_from_config_file"): continue # Get current value of option value = getattr(linter.config, option.name.replace("-", "_")) # Create a comment if the option has no value if value is None: checker_table.add(tomlkit.comment(f"{option.name} =")) checker_table.add(tomlkit.nl()) continue # Tomlkit doesn't support regular expressions if isinstance(value, re.Pattern): value = value.pattern elif ( isinstance(value, (list, tuple)) and value and isinstance(value[0], re.Pattern) ): value = [i.pattern for i in value] # Add to table checker_table.add(option.name, value) checker_table.add(tomlkit.nl()) pylint_tool_table.add(options[0].checker.name.lower(), checker_table) toml_string = "\n".join( f" {i}" if i else "" for i in tomlkit.dumps(toml_doc).split("\n") ) checker_string += f""" .. raw:: html <details> <summary><a>Example configuration section</a></summary> **Note:** Only ``pylint.tool`` is required, the section title is not. These are the default values. .. code-block:: toml {toml_string} .. raw:: html </details> """ return checker_string
def generate_yombo_toml(self, display_extra_warning: Optional[bool] = None ) -> List[str]: """ Generates the output for yombo.toml in TOML format - like INI, but better. If display_extra_warning is True, will display an even more nasty message to not edit this file while its running. :param display_extra_warning: :return: """ def wrap_comment_lines(text, trailing_comment: Optional[bool] = None, contents: Optional = None): """ Creates a comment that wraps long lines. """ output = wrap(text, 75, break_long_words=False) if contents is None: contents = ini_contents for line in output: if trailing_comment is True: contents.add(tk.comment(f"{line: <76} #")) else: contents.add(tk.comment(f"{line}")) ini_contents = tk.document() meta_contents = tk.document() meta_contents.add( tk.comment( "################################################################################" )) meta_contents.add( tk.comment( " Stores additional information about yombo.toml, non-essential." )) meta_contents.add( tk.comment( "################################################################################" )) meta_contents.add(tk.nl()) ini_contents.add(tk.comment(" ")) ini_contents.add(tk.comment(_("lib.configs.yombo_toml.about"))) ini_contents.add(tk.comment(" ")) if display_extra_warning is True: ini_contents.add(tk.comment(" ")) ini_contents.add( tk.comment( "###############################################################################" )) ini_contents.add( tk.comment(f'{_("lib.configs.yombo_toml.warning"):^76}')) ini_contents.add( tk.comment( "###############################################################################" )) ini_contents.add( tk.comment(_("lib.configs.yombo_toml.still_running"))) ini_contents.add( tk.comment( _("lib.configs.yombo_toml.still_running_pid", number=str(self.pid)))) ini_contents.add( tk.comment( "###############################################################################" )) else: wrap_comment_lines(_("lib.configs.yombo_toml.dont_edit"), False) ini_contents.add(tk.comment(" ")) ini_contents.add(tk.comment(" ")) def add_config_item(section, current_path, data): """ Recursive function to output config contents. """ config_data = data.to_dict(include_meta=False) item_msgid = f'lib.configs.item.{".".join(current_path)}' item_description = _(item_msgid) if item_msgid != item_description: wrap_comment_lines(item_description, contents=section) section.add(current_path[-1], config_data["value"]) if item_msgid != item_description: section.add(tk.nl()) meta_data = { key: config_data[key] for key in config_data.keys() - {'value', 'config'} } access_dict_set(config_data["config"], meta_contents, meta_data) def add_dictionary(current_path, data): """ Recursive function to output config contents. """ section = tk.table() if len(current_path): ini_contents.add(tk.nl()) ini_contents.add(tk.nl()) ini_contents.add( tk.comment( "##############################################################################" )) ini_contents.add( tk.comment(f'{".".join(current_path): ^76} #')) ini_contents.add(tk.comment(f"{'': <76} #")) wrap_comment_lines( _(f'lib.configs.section.{".".join(current_path)}'), True, contents=ini_contents) ini_contents.add( tk.comment( "##############################################################################" )) for config, items in data.items(): temp_path = deepcopy(current_path) temp_path.append(config) if isinstance(items, ConfigItem): if items.value is None: continue add_config_item(section, temp_path, items) else: add_dictionary(temp_path, items) if len(current_path): ini_contents[".".join(current_path)] = section add_dictionary([], self.configs) return tk.dumps(ini_contents), tk.dumps(meta_contents)
def _dump_v2_manifest_as_toml(manifest): import re from tomlkit import document, nl, table, dumps, comment toml_manifest = document() toml_manifest.add("packaging_format", 2) toml_manifest.add(nl()) toml_manifest.add("id", manifest["id"]) toml_manifest.add("name", manifest["name"]) for lang, value in manifest["description"].items(): toml_manifest.add(f"description.{lang}", value) toml_manifest.add(nl()) toml_manifest.add("version", manifest["version"]) toml_manifest.add(nl()) toml_manifest.add("maintainers", manifest["maintainers"]) upstream = table() for key, value in manifest["upstream"].items(): upstream[key] = value toml_manifest["upstream"] = upstream integration = table() for key, value in manifest["integration"].items(): integration.add(key, value) integration["architectures"].comment( 'FIXME: can be replaced by a list of supported archs using the dpkg --print-architecture nomenclature (amd64/i386/armhf/arm64/armel), for example: ["amd64", "i386"]' ) integration["ldap"].comment( 'FIXME: replace with true, false, or "not_relevant"') integration["sso"].comment( 'FIXME: replace with true, false, or "not_relevant"') integration["disk"].comment( 'FIXME: replace with an **estimate** minimum disk requirement. e.g. 20M, 400M, 1G, ...' ) integration["ram.build"].comment( 'FIXME: replace with an **estimate** minimum ram requirement. e.g. 50M, 400M, 1G, ...' ) integration["ram.runtime"].comment( 'FIXME: replace with an **estimate** minimum ram requirement. e.g. 50M, 400M, 1G, ...' ) toml_manifest["integration"] = integration install = table() for key, value in manifest["install"].items(): install[key] = table() install[key].indent(4) if key in ["domain", "path", "admin", "is_public", "password"]: install[key].add( comment( "this is a generic question - ask strings are automatically handled by Yunohost's core" )) for lang, value2 in value.get("ask", {}).items(): install[key].add(f"ask.{lang}", value2) for lang, value2 in value.get("help", {}).items(): install[key].add(f"help.{lang}", value2) for key2, value2 in value.items(): if key2 in ["ask", "help"]: continue install[key].add(key2, value2) toml_manifest["install"] = install resources = table() for key, value in manifest["resources"].items(): resources[key] = table() resources[key].indent(4) for key2, value2 in value.items(): resources[key].add(key2, value2) if key == "apt" and key2 == "extras": for extra in resources[key][key2]: extra.indent(8) toml_manifest["resources"] = resources toml_manifest_dump = dumps(toml_manifest) regex = re.compile(r'\"((description|ask|help)\.[a-z]{2})\"') toml_manifest_dump = regex.sub(r'\1', toml_manifest_dump) toml_manifest_dump = toml_manifest_dump.replace('"ram.build"', "ram.build") toml_manifest_dump = toml_manifest_dump.replace('"ram.runtime"', "ram.runtime") toml_manifest_dump = toml_manifest_dump.replace('"main.url"', "main.url") toml_manifest_dump = toml_manifest_dump.replace('"main.default"', "main.default") return toml_manifest_dump
def decompose(output, fill_descriptor_path, source, override): logger = logging.getLogger(__name__) logger.debug("Loading model from %s", source) source = Path(source) if output is None: output = get_default_output_directory(source) else: output = Path(output) output.mkdir(parents=True, exist_ok=True) fill_descriptor = tk.document() fill_descriptor.add( tk.comment(f"This is a decomposition of \"{source.name}\" model")) parse_result: ParseResult = from_file(source) if parse_result.title: fill_descriptor.append("title", parse_result.title) model: Universe = parse_result.universe if model.comment: fill_descriptor.append("comment", model.comment) fill_descriptor.append("created", datetime.now()) fill_descriptor.add(tk.nl()) already_processed_universes = set() for c in model: fill = c.options.pop('FILL', None) if fill: universe = fill['universe'] words = [f'FILL={universe.name()}'] transform = fill.get('transform', None) if transform: words[0] = '*' + words[0] words.append('(') words.extend(transform.get_words()) words.append(')') comm = c.options.get('comment', []) comm.append(''.join(words)) c.options['comment'] = comm descriptor = tk.table() universe_name = universe.name() fn = f'u{universe_name}.i' descriptor['universe'] = universe_name if transform: name = transform.name() if name is None: # The transformation is anonymous, so, store it's specification # omitting redundant '*', TR0 words, and interleaving space tokens descriptor['transform'] = tk.array( transform.mcnp_words()[2:][1::2]) else: descriptor['transform'] = name descriptor['file'] = fn fill_descriptor.append(str(c.name()), descriptor) fill_descriptor.add(tk.nl()) if universe_name not in already_processed_universes: move_universe_attribute_to_comments(universe) save_mcnp(universe, output / fn, override) logger.debug("The universe %s has been saved to %s", universe_name, fn) already_processed_universes.add(universe_name) with open(output / fill_descriptor_path, "w") as fid: res = tk.dumps(fill_descriptor) fid.write(res) envelopes_path = output / "envelopes.i" save_mcnp(model, envelopes_path, override) logger.debug("The envelopes are saved to %s", envelopes_path)
def source_to_table(source: Source) -> Table: source_table: Table = table() for key, value in source.to_dict().items(): source_table.add(key, value) source_table.add(nl()) return source_table
def decompose(output, fill_descriptor_path, source, override): logger.debug("Loading model from {}", source) source = Path(source) if output is None: output = get_default_output_directory(source) else: output = Path(output) output.mkdir(parents=True, exist_ok=True) fill_descriptor = tk.document() fill_descriptor.add( tk.comment(f'This is a decomposition of "{source.name}" model')) parse_result: ParseResult = from_file(source) if parse_result.title: fill_descriptor.append("title", parse_result.title) model: Universe = parse_result.universe if model.comment: fill_descriptor.append("comment", model.comment) named_transformations = list(collect_transformations(model)) fill_descriptor.append("created", item(datetime.now())) fill_descriptor.add(tk.nl()) already_processed_universes = set() for c in model: fill = c.options.pop("FILL", None) if fill: universe = fill["universe"] words = [f"FILL={universe.name()}"] transform = fill.get("transform", None) if transform: words[0] = "*" + words[0] words.append("(") words.extend(transform.get_words()) words.append(")") comm = c.options.get("comment", []) comm.append("".join(words)) c.options["comment"] = comm descriptor = tk.table() universe_name = universe.name() fn = f"u{universe_name}.i" descriptor["universe"] = universe_name if transform: name = transform.name() if name is None: # The transformation is anonymous, so, store it's specification # omitting redundant '*', TR0 words, and interleaving space tokens descriptor["transform"] = tk.array( transform.mcnp_words()[2:][1::2]) else: descriptor["transform"] = name descriptor["file"] = fn fill_descriptor.append(str(c.name()), descriptor) fill_descriptor.add(tk.nl()) if universe_name not in already_processed_universes: move_universe_attribute_to_comments(universe) save_mcnp(universe, output / fn, override) logger.debug("The universe {} has been saved to {}", universe_name, fn) already_processed_universes.add(universe_name) named_transformations_descriptor = tk.table() named_transformations = sorted(named_transformations, key=lambda x: x.name()) for t in named_transformations: named_transformations_descriptor[f"tr{t.name()}"] = tk.array( t.mcnp_words()[2:][1::2]) fill_descriptor.append("named_transformations", named_transformations_descriptor) fill_descriptor.add(tk.nl()) fdp = output / fill_descriptor_path with open(fdp, "w") as fid: res = tk.dumps(fill_descriptor) fid.write(res) logger.debug("Fill descriptor is saved in {}", fdp) envelopes_path = output / "envelopes.i" save_mcnp(model, envelopes_path, override) logger.debug("The envelopes are saved to {}", envelopes_path)