def test_array(): a = tomlkit.array() assert isinstance(a, Array) a = tomlkit.array("[1,2, 3]") assert isinstance(a, Array)
def _validate(self, data: TOMLDocument) -> None: nonebot_data = data.setdefault("nonebot", tomlkit.table()) if not isinstance(nonebot_data, Table): raise ValueError("'nonebot' in toml file is not a Table!") plugin_data = nonebot_data.setdefault("plugins", tomlkit.table()) if not isinstance(plugin_data, Table): raise ValueError("'nonebot.plugins' in toml file is not a Table!") plugins = plugin_data.setdefault("plugins", tomlkit.array()) if not isinstance(plugins, Array): raise ValueError( "'nonebot.plugins.plugins' in toml file is not a Array!") plugin_dirs = plugin_data.setdefault("plugin_dirs", tomlkit.array()) if not isinstance(plugin_dirs, Array): raise ValueError( "'nonebot.plugins.plugin_dirs' in toml file is not a Array!")
def format_lockfile( mapping: Dict[str, Candidate], fetched_dependencies: Dict[str, List[Requirement]], summary_collection: Dict[str, str], ) -> Dict: """Format lock file from a dict of resolved candidates, a mapping of dependencies and a collection of package summaries. """ packages = tomlkit.aot() file_hashes = tomlkit.table() for k, v in sorted(mapping.items()): base = tomlkit.table() base.update(v.as_lockfile_entry()) base.add("summary", summary_collection[strip_extras(k)[0]]) deps = make_array([r.as_line() for r in fetched_dependencies[k]], True) if len(deps) > 0: base.add("dependencies", deps) packages.append(base) if v.hashes: key = f"{k} {v.version}" array = tomlkit.array() array.multiline(True) for filename, hash_value in v.hashes.items(): inline = make_inline_table({"file": filename, "hash": hash_value}) array.append(inline) if array: file_hashes.add(key, array) doc = tomlkit.document() doc.add("package", packages) metadata = tomlkit.table() metadata.add("files", file_hashes) doc.add("metadata", metadata) return doc
def create_configuration(self, config: Configuration, *style_urls: str) -> None: """Create a configuration file.""" from nitpick.style import StyleManager # pylint: disable=import-outside-toplevel if config.file: doc: TOMLDocument = tomlkit.parse(config.file.read_text()) else: doc = tomlkit.document() config.file = self.root / DOT_NITPICK_TOML if not style_urls: style_urls = (str(StyleManager.get_default_style_url()), ) tool_nitpick = tomlkit.table() tool_nitpick.add( tomlkit.comment("Generated by the 'nitpick init' command")) tool_nitpick.add( tomlkit.comment( f"More info at {READ_THE_DOCS_URL}configuration.html")) tool_nitpick.add( "style", tomlkit.array([tomlkit.string(url) for url in style_urls])) doc.add(SingleKey(TOOL_NITPICK_KEY, KeyType.Bare), tool_nitpick) # config.file will always have a value at this point, but mypy can't see it. config.file.write_text(tomlkit.dumps(doc, sort_keys=True)) # type: ignore
def test_replace_with_comment(): content = 'a = "1"' doc = parse(content) a = tomlkit.item(int(doc["a"])) a.comment("`a` should be an int") doc["a"] = a expected = "a = 1 # `a` should be an int" assert doc.as_string() == expected content = 'a = "1, 2, 3"' doc = parse(content) a = tomlkit.array() a.comment("`a` should be an array") for x in doc["a"].split(","): a.append(int(x.strip())) doc["a"] = a expected = "a = [1, 2, 3] # `a` should be an array" assert doc.as_string() == expected doc = parse(content) a = tomlkit.inline_table() a.comment("`a` should be an inline-table") for x in doc["a"].split(","): i = int(x.strip()) a.append(chr(ord("a") + i - 1), i) doc["a"] = a expected = "a = {a = 1, b = 2, c = 3} # `a` should be an inline-table" assert doc.as_string() == expected
def format_lockfile(mapping, fetched_dependencies, summary_collection): """Format lock file from a dict of resolved candidates, a mapping of dependencies and a collection of package summaries. """ packages = tomlkit.aot() metadata = tomlkit.table() for k, v in sorted(mapping.items()): base = tomlkit.table() base.update(v.as_lockfile_entry()) base.add("summary", summary_collection[strip_extras(k)[0]]) deps = tomlkit.table() for r in fetched_dependencies[k].values(): name, req = r.as_req_dict() if getattr(req, "items", None) is not None: inline = tomlkit.inline_table() inline.update(req) deps.add(name, inline) else: deps.add(name, req) if len(deps) > 0: base.add("dependencies", deps) packages.append(base) if v.hashes: key = f"{k} {v.version}" array = tomlkit.array() array.multiline(True) for filename, hash_value in v.hashes.items(): inline = tomlkit.inline_table() inline.update({"file": filename, "hash": hash_value}) array.append(inline) if array: metadata.add(key, array) doc = tomlkit.document() doc.update({"package": packages, "metadata": metadata}) return doc
def make_array(data, multiline=False): if not data: return [] array = tomlkit.array() array.multiline(multiline) for item in data: array.append(item) return array
def make_array(data: list, multiline: bool = False) -> Array: if not data: return [] array = tomlkit.array() array.multiline(multiline) for item in data: array.append(item) return array
def format_lockfile( project: Project, mapping: dict[str, Candidate], fetched_dependencies: dict[str, list[Requirement]], ) -> dict: """Format lock file from a dict of resolved candidates, a mapping of dependencies and a collection of package summaries. """ packages = tomlkit.aot() file_hashes = tomlkit.table() for k, v in sorted(mapping.items()): base = tomlkit.table() base.update(v.as_lockfile_entry(project.root)) # type: ignore base.add("summary", v.summary or "") deps = make_array(sorted(r.as_line() for r in fetched_dependencies[k]), True) if len(deps) > 0: base.add("dependencies", deps) packages.append(base) # type: ignore if v.hashes: key = f"{strip_extras(k)[0]} {v.version}" if key in file_hashes: continue array = tomlkit.array().multiline(True) for filename, hash_value in v.hashes.items(): inline = make_inline_table({ "file": filename, "hash": hash_value }) array.append(inline) # type: ignore if array: file_hashes.add(key, array) doc = tomlkit.document() doc.add("package", packages) # type: ignore metadata = tomlkit.table() metadata.add("files", file_hashes) doc.add("metadata", metadata) # type: ignore return cast(dict, doc)
def test_build_example(example): content = example("example") doc = document() doc.add(comment("This is a TOML document. Boom.")) doc.add(nl()) doc.add("title", "TOML Example") owner = table() owner.add("name", "Tom Preston-Werner") owner.add("organization", "GitHub") owner.add("bio", "GitHub Cofounder & CEO\\nLikes tater tots and beer.") owner.add("dob", datetime.datetime(1979, 5, 27, 7, 32, tzinfo=_utc)) owner["dob"].comment("First class dates? Why not?") doc.add("owner", owner) database = table() database["server"] = "192.168.1.1" database["ports"] = [8001, 8001, 8002] database["connection_max"] = 5000 database["enabled"] = True doc["database"] = database servers = table() servers.add(nl()) c = comment( "You can indent as you please. Tabs or spaces. TOML don't care." ).indent(2) c.trivia.trail = "" servers.add(c) alpha = table() servers.append("alpha", alpha) alpha.indent(2) alpha.add("ip", "10.0.0.1") alpha.add("dc", "eqdc10") beta = table() servers.append("beta", beta) beta.add("ip", "10.0.0.2") beta.add("dc", "eqdc10") beta.add("country", "中国") beta["country"].comment("This should be parsed as UTF-8") beta.indent(2) doc["servers"] = servers clients = table() doc.add("clients", clients) clients["data"] = item( [["gamma", "delta"], [1, 2]]).comment("just an update to make sure parsers support it") clients.add(nl()) clients.add(comment("Line breaks are OK when inside arrays")) clients["hosts"] = array("""[ "alpha", "omega" ]""") doc.add(nl()) doc.add(comment("Products")) products = aot() doc["products"] = products hammer = table().indent(2) hammer["name"] = "Hammer" hammer["sku"] = 738594937 nail = table().indent(2) nail["name"] = "Nail" nail["sku"] = 284758393 nail["color"] = "gray" products.append(hammer) products.append(nail) assert content == doc.as_string()
def dumps(self, reqs, project: RootDependency, content=None) -> str: # read config if content: doc = tomlkit.parse(content) else: doc = tomlkit.document() # get tool section from config if 'tool' not in doc: doc['tool'] = {'flit': {'metadata': tomlkit.table()}} elif 'flit' not in doc['tool']: doc['tool']['flit'] = {'metadata': tomlkit.table()} elif 'metadata' not in doc['tool']['flit']: doc['tool']['flit']['metadata'] = tomlkit.table() section = doc['tool']['flit']['metadata'] # project and module names module = project.package.packages[0].module section['module'] = module if project.raw_name != module: section['dist-name'] = project.raw_name elif 'dist-name' in section: del section['dist-name'] # author and maintainer for field, author in zip(('author', 'maintainer'), project.authors): # add name section[field] = author.name # add new or remove old mail field = field + '-email' if author.mail: section[field] = author.mail elif field in section: del section[field] if not project.authors: # remove old author if 'author' in section: del section['author'] if 'author-email' in section: del section['author-email'] if len(project.authors) < 2: # remove old maintainer if 'maintainer' in section: del section['maintainer'] if 'maintainer-email' in section: del section['maintainer-email'] # metainfo for field in ('license', 'keywords', 'classifiers'): value = getattr(project, field) if field == 'keywords': value = ' '.join(value) if isinstance(value, tuple): value = list(value) if not value: # delete if field in section: del section[field] elif field not in section: # insert section[field] = value elif section[field].value != value: # update section[field] = value # write links if 'homepage' in project.links: section['home-page'] = project.links['homepage'] if set(project.links) - {'homepage'}: if 'urls' in section: # remove old for name in section['urls']: if name not in project.links: del section['urls'][name] else: section['urls'] = tomlkit.table() # add and update for name, url in project.links.items(): if name == 'homepage': continue section['urls'][name] = url elif 'urls' in section: del section['urls'] # readme if project.readme: section['description-file'] = project.readme.path.name elif 'description-file' in section: del section['description-file'] # python constraint python = str(project.python) if python not in ('', '*'): section['requires-python'] = python elif 'requires-python' in section: del section['requires-python'] # dependencies for section_name, is_dev in [('requires', False), ('dev-requires', True)]: section[section_name] = tomlkit.array() for req in sorted(reqs): if req.main_envs: continue if is_dev is req.is_dev: section[section_name].append(self._format_req(req=req)) if not section[section_name].value: del section[section_name] # extras envs = set(chain(*[req.main_envs for req in reqs])) if 'requires-extra' in section: for env in section['requires-extra']: if env in envs: # clean env from old packages section['requires-extra'][env] = tomlkit.array() else: # remove old env del section['requires-extra'][env] else: section['requires-extra'] = tomlkit.table() for env in envs: if env not in section['requires-extra']: # create new env section['requires-extra'][env] = tomlkit.array() # write new extra packages for req in sorted(reqs): for env in req.main_envs: section['requires-extra'][env].append(self._format_req(req=req)) # scripts if 'scripts' in doc['tool']['flit']: # remove old scripts names = [e.name for e in project.entrypoints if e.group == 'console_scripts'] for name in tuple(doc['tool']['flit']['scripts']): if name not in names: del doc['tool']['flit']['scripts'][name] else: doc['tool']['flit']['scripts'] = tomlkit.table() for entrypoint in project.entrypoints: if entrypoint.group != 'console_scripts': continue doc['tool']['flit']['scripts'][entrypoint.name] = entrypoint.path # entrypoints if 'entrypoints' in doc['tool']['flit']: groups = [e.group for e in project.entrypoints] for group, entrypoints in list(doc['tool']['flit']['entrypoints'].items()): # remove old group if group not in groups: del doc['tool']['flit']['entrypoints'][group] continue # remove old entrypoints in group names = [e.name for e in project.entrypoints if e.group == group] for name in tuple(entrypoints): if name not in names: del doc['tool']['flit']['entrypoints'][group][name] else: doc['tool']['flit']['entrypoints'] = tomlkit.table() for entrypoint in project.entrypoints: if entrypoint.group == 'console_scripts': continue if entrypoint.group not in doc['tool']['flit']['entrypoints']: doc['tool']['flit']['entrypoints'][entrypoint.group] = tomlkit.table() doc['tool']['flit']['entrypoints'][entrypoint.group][entrypoint.name] = entrypoint.path return tomlkit.dumps(doc)
def make_array(data: list, multiline: bool = False) -> list: if not data: return [] array = cast(list, tomlkit.array().multiline(multiline)) array.extend(data) return array
def _dump_package(self, package): # type: (Package) -> dict dependencies = {} for dependency in sorted(package.requires, key=lambda d: d.name): if dependency.pretty_name not in dependencies: dependencies[dependency.pretty_name] = [] constraint = inline_table() constraint["version"] = str(dependency.pretty_constraint) if dependency.extras: constraint["extras"] = sorted(dependency.extras) if dependency.is_optional(): constraint["optional"] = True if not dependency.marker.is_any(): constraint["markers"] = str(dependency.marker) dependencies[dependency.pretty_name].append(constraint) # All the constraints should have the same type, # but we want to simplify them if it's possible for dependency, constraints in tuple(dependencies.items()): if all(len(constraint) == 1 for constraint in constraints): dependencies[dependency] = [ constraint["version"] for constraint in constraints ] data = OrderedDict([ ("name", package.pretty_name), ("version", package.pretty_version), ("description", package.description or ""), ("category", package.category), ("optional", package.optional), ("python-versions", package.python_versions), ("files", sorted(package.files, key=lambda x: x["file"])), ]) if dependencies: data["dependencies"] = table() for k, constraints in dependencies.items(): if len(constraints) == 1: data["dependencies"][k] = constraints[0] else: data["dependencies"][k] = array().multiline(True) for constraint in constraints: data["dependencies"][k].append(constraint) if package.extras: extras = {} for name, deps in package.extras.items(): extras[name] = [ str(dep) if not dep.constraint.is_any() else dep.name for dep in deps ] data["extras"] = extras if package.source_url: url = package.source_url if package.source_type in ["file", "directory"]: # The lock file should only store paths relative to the root project url = Path( os.path.relpath( Path(url).as_posix(), self._lock.path.parent.as_posix())).as_posix() data["source"] = OrderedDict() if package.source_type: data["source"]["type"] = package.source_type data["source"]["url"] = url if package.source_reference: data["source"]["reference"] = package.source_reference if package.source_resolved_reference: data["source"][ "resolved_reference"] = package.source_resolved_reference if package.source_type == "directory": data["develop"] = package.develop return data
for sound in soundboard["sound"]: sound["source"] = tomlkit.inline_table() if "<speak>" in sound["path"]: sound["source"]["tts"] = tomlkit.inline_table() sound["source"]["tts"]["ssml"] = sound["path"] sound["source"]["tts"]["lang"] = sound["tts_language"] elif "youtube.com" in sound["path"] or "youtu.be" in sound["path"]: sound["source"]["youtube"] = tomlkit.inline_table() sound["source"]["youtube"]["id"] = youtube_pattern.match( sound["path"]).group(1) elif sound["path"].startswith("http"): if "header" in sound: sound["source"]["http"] = tomlkit.inline_table() sound["source"]["http"]["url"] = sound["path"] header = tomlkit.array() for val in sound["header"]: inline = tomlkit.inline_table() inline.append("name", val["name"]) inline.append("value", val["value"]) header.append(inline) sound["source"]["http"]["headers"] = header else: sound["source"]["http"] = tomlkit.inline_table() sound["source"]["http"]["url"] = sound["path"] else: sound["source"]["local"] = tomlkit.inline_table() sound["source"]["local"]["path"] = sound["path"] del sound["path"] if "header" in sound:
def _dump_package(self, package: Package) -> dict: dependencies = {} for dependency in sorted(package.requires, key=lambda d: d.name): if dependency.pretty_name not in dependencies: dependencies[dependency.pretty_name] = [] constraint = inline_table() if dependency.is_directory() or dependency.is_file(): constraint["path"] = dependency.path.as_posix() if dependency.is_directory() and dependency.develop: constraint["develop"] = True elif dependency.is_url(): constraint["url"] = dependency.url elif dependency.is_vcs(): constraint[dependency.vcs] = dependency.source if dependency.branch: constraint["branch"] = dependency.branch elif dependency.tag: constraint["tag"] = dependency.tag elif dependency.rev: constraint["rev"] = dependency.rev else: constraint["version"] = str(dependency.pretty_constraint) if dependency.extras: constraint["extras"] = sorted(dependency.extras) if dependency.is_optional(): constraint["optional"] = True if not dependency.marker.is_any(): constraint["markers"] = str(dependency.marker) dependencies[dependency.pretty_name].append(constraint) # All the constraints should have the same type, # but we want to simplify them if it's possible for dependency, constraints in tuple(dependencies.items()): if all( len(constraint) == 1 and "version" in constraint for constraint in constraints): dependencies[dependency] = [ constraint["version"] for constraint in constraints ] data = dict([ ("name", package.pretty_name), ("version", package.pretty_version), ("description", package.description or ""), ("category", package.category), ("optional", package.optional), ("python-versions", package.python_versions), ("files", sorted(package.files, key=lambda x: x["file"])), ]) if dependencies: data["dependencies"] = table() for k, constraints in dependencies.items(): if len(constraints) == 1: data["dependencies"][k] = constraints[0] else: data["dependencies"][k] = array().multiline(True) for constraint in constraints: data["dependencies"][k].append(constraint) if package.extras: extras = {} for name, deps in package.extras.items(): # TODO: This should use dep.to_pep_508() once this is fixed # https://github.com/python-poetry/poetry-core/pull/102 extras[name] = [ dep.base_pep_508_name if not dep.constraint.is_any() else dep.name for dep in deps ] data["extras"] = extras if package.source_url: url = package.source_url if package.source_type in ["file", "directory"]: # The lock file should only store paths relative to the root project url = Path( os.path.relpath( Path(url).as_posix(), self._lock.path.parent.as_posix())).as_posix() data["source"] = dict() if package.source_type: data["source"]["type"] = package.source_type data["source"]["url"] = url if package.source_reference: data["source"]["reference"] = package.source_reference if package.source_resolved_reference: data["source"][ "resolved_reference"] = package.source_resolved_reference if package.source_type in ["directory", "git"]: data["develop"] = package.develop return data
def decompose(output, fill_descriptor_path, source, override): logger = logging.getLogger(__name__) logger.debug("Loading model from %s", source) source = Path(source) if output is None: output = get_default_output_directory(source) else: output = Path(output) output.mkdir(parents=True, exist_ok=True) fill_descriptor = tk.document() fill_descriptor.add( tk.comment(f"This is a decomposition of \"{source.name}\" model")) parse_result: ParseResult = from_file(source) if parse_result.title: fill_descriptor.append("title", parse_result.title) model: Universe = parse_result.universe if model.comment: fill_descriptor.append("comment", model.comment) fill_descriptor.append("created", datetime.now()) fill_descriptor.add(tk.nl()) already_processed_universes = set() for c in model: fill = c.options.pop('FILL', None) if fill: universe = fill['universe'] words = [f'FILL={universe.name()}'] transform = fill.get('transform', None) if transform: words[0] = '*' + words[0] words.append('(') words.extend(transform.get_words()) words.append(')') comm = c.options.get('comment', []) comm.append(''.join(words)) c.options['comment'] = comm descriptor = tk.table() universe_name = universe.name() fn = f'u{universe_name}.i' descriptor['universe'] = universe_name if transform: name = transform.name() if name is None: # The transformation is anonymous, so, store it's specification # omitting redundant '*', TR0 words, and interleaving space tokens descriptor['transform'] = tk.array( transform.mcnp_words()[2:][1::2]) else: descriptor['transform'] = name descriptor['file'] = fn fill_descriptor.append(str(c.name()), descriptor) fill_descriptor.add(tk.nl()) if universe_name not in already_processed_universes: move_universe_attribute_to_comments(universe) save_mcnp(universe, output / fn, override) logger.debug("The universe %s has been saved to %s", universe_name, fn) already_processed_universes.add(universe_name) with open(output / fill_descriptor_path, "w") as fid: res = tk.dumps(fill_descriptor) fid.write(res) envelopes_path = output / "envelopes.i" save_mcnp(model, envelopes_path, override) logger.debug("The envelopes are saved to %s", envelopes_path)
def decompose(output, fill_descriptor_path, source, override): logger.debug("Loading model from {}", source) source = Path(source) if output is None: output = get_default_output_directory(source) else: output = Path(output) output.mkdir(parents=True, exist_ok=True) fill_descriptor = tk.document() fill_descriptor.add( tk.comment(f'This is a decomposition of "{source.name}" model')) parse_result: ParseResult = from_file(source) if parse_result.title: fill_descriptor.append("title", parse_result.title) model: Universe = parse_result.universe if model.comment: fill_descriptor.append("comment", model.comment) named_transformations = list(collect_transformations(model)) fill_descriptor.append("created", item(datetime.now())) fill_descriptor.add(tk.nl()) already_processed_universes = set() for c in model: fill = c.options.pop("FILL", None) if fill: universe = fill["universe"] words = [f"FILL={universe.name()}"] transform = fill.get("transform", None) if transform: words[0] = "*" + words[0] words.append("(") words.extend(transform.get_words()) words.append(")") comm = c.options.get("comment", []) comm.append("".join(words)) c.options["comment"] = comm descriptor = tk.table() universe_name = universe.name() fn = f"u{universe_name}.i" descriptor["universe"] = universe_name if transform: name = transform.name() if name is None: # The transformation is anonymous, so, store it's specification # omitting redundant '*', TR0 words, and interleaving space tokens descriptor["transform"] = tk.array( transform.mcnp_words()[2:][1::2]) else: descriptor["transform"] = name descriptor["file"] = fn fill_descriptor.append(str(c.name()), descriptor) fill_descriptor.add(tk.nl()) if universe_name not in already_processed_universes: move_universe_attribute_to_comments(universe) save_mcnp(universe, output / fn, override) logger.debug("The universe {} has been saved to {}", universe_name, fn) already_processed_universes.add(universe_name) named_transformations_descriptor = tk.table() named_transformations = sorted(named_transformations, key=lambda x: x.name()) for t in named_transformations: named_transformations_descriptor[f"tr{t.name()}"] = tk.array( t.mcnp_words()[2:][1::2]) fill_descriptor.append("named_transformations", named_transformations_descriptor) fill_descriptor.add(tk.nl()) fdp = output / fill_descriptor_path with open(fdp, "w") as fid: res = tk.dumps(fill_descriptor) fid.write(res) logger.debug("Fill descriptor is saved in {}", fdp) envelopes_path = output / "envelopes.i" save_mcnp(model, envelopes_path, override) logger.debug("The envelopes are saved to {}", envelopes_path)