コード例 #1
0
def toml_headers():
    filename = Path("_netlify.toml")

    doc = document()
    doc.add(comment("netlify.toml"))
    doc.add(comment("Generated: " + datetime.now().isoformat()))

    build = table()
    env = table().indent(2)
    env["YARN_VERSION"] = "1.21.0"
    build["publish"] = "_site/"
    build["command"] = "make build"
    build["environment"] = env
    doc["build"] = build

    headers = aot()

    sw = make_headers("sw.js", {
        "service-worker-allowed": "/",
        "cache-control": NO_CACHE
    })
    headers.append(sw)
    manifest = make_headers("**/manifest.json", {"cache-control": NO_CACHE})
    headers.append(manifest)
    for pattern in FOREVER_PATTERNS:
        headers.append(make_headers(pattern, {"cache-control": CACHE_FOREVER}))

    doc["headers"] = headers

    output = dumps(doc)
    print(output)
    sz = filename.write_text(output)
    print(sz)
コード例 #2
0
ファイル: pipfile.py プロジェクト: lucidshift/pipenv
def reorder_source_keys(data):
    # type: (tomlkit.toml_document.TOMLDocument) -> tomlkit.toml_document.TOMLDocument
    sources = []  # type: sources_type
    for source_key in ["source", "sources"]:
        sources.extend(data.get(source_key, tomlkit.aot()).value)
    new_source_aot = tomlkit.aot()
    for entry in sources:
        table = tomlkit.table()  # type: tomlkit.items.Table
        source_entry = PipfileLoader.populate_source(entry.copy())
        for key in ["name", "url", "verify_ssl"]:
            table.update({key: source_entry[key]})
        new_source_aot.append(table)
    data["source"] = new_source_aot
    if data.get("sources", None):
        del data["sources"]
    return data
コード例 #3
0
def add_hook(test_repo: Path,
             name: str,
             cmd: str,
             after_push: bool = False) -> None:
    """ Patch the configuration file so that we can also test hooks.

    """
    cfg_path = test_repo / "pyproject.toml"
    parsed = tomlkit.loads(cfg_path.text())
    if after_push:
        key = "after_push"
    else:
        key = "before_commit"
    if key not in parsed["tool"]["tbump"]:
        parsed["tool"]["tbump"][key] = tomlkit.aot()
    hook_config = tomlkit.table()
    hook_config.add("cmd", cmd)
    hook_config.add("name", name)
    parsed["tool"]["tbump"][key].append(hook_config)
    from pprint import pprint
    pprint(parsed)

    cfg_path.write_text(tomlkit.dumps(parsed))
    tbump.git.run_git(test_repo, "add", ".")
    tbump.git.run_git(test_repo, "commit", "--message", "update hooks")
コード例 #4
0
def format_lockfile(
    mapping: Dict[str, Candidate],
    fetched_dependencies: Dict[str, List[Requirement]],
    summary_collection: Dict[str, str],
) -> Dict:
    """Format lock file from a dict of resolved candidates, a mapping of dependencies
    and a collection of package summaries.
    """
    packages = tomlkit.aot()
    file_hashes = tomlkit.table()
    for k, v in sorted(mapping.items()):
        base = tomlkit.table()
        base.update(v.as_lockfile_entry())
        base.add("summary", summary_collection[strip_extras(k)[0]])
        deps = make_array([r.as_line() for r in fetched_dependencies[k]], True)
        if len(deps) > 0:
            base.add("dependencies", deps)
        packages.append(base)
        if v.hashes:
            key = f"{k} {v.version}"
            array = tomlkit.array()
            array.multiline(True)
            for filename, hash_value in v.hashes.items():
                inline = make_inline_table({"file": filename, "hash": hash_value})
                array.append(inline)
            if array:
                file_hashes.add(key, array)
    doc = tomlkit.document()
    doc.add("package", packages)
    metadata = tomlkit.table()
    metadata.add("files", file_hashes)
    doc.add("metadata", metadata)
    return doc
コード例 #5
0
ファイル: actions.py プロジェクト: stacklens/pdm
def format_lockfile(mapping, fetched_dependencies, summary_collection):
    """Format lock file from a dict of resolved candidates, a mapping of dependencies
    and a collection of package summaries.
    """
    packages = tomlkit.aot()
    metadata = tomlkit.table()
    for k, v in sorted(mapping.items()):
        base = tomlkit.table()
        base.update(v.as_lockfile_entry())
        base.add("summary", summary_collection[strip_extras(k)[0]])
        deps = tomlkit.table()
        for r in fetched_dependencies[k].values():
            name, req = r.as_req_dict()
            if getattr(req, "items", None) is not None:
                inline = tomlkit.inline_table()
                inline.update(req)
                deps.add(name, inline)
            else:
                deps.add(name, req)
        if len(deps) > 0:
            base.add("dependencies", deps)
        packages.append(base)
        if v.hashes:
            key = f"{k} {v.version}"
            array = tomlkit.array()
            array.multiline(True)
            for filename, hash_value in v.hashes.items():
                inline = tomlkit.inline_table()
                inline.update({"file": filename, "hash": hash_value})
                array.append(inline)
            if array:
                metadata.add(key, array)
    doc = tomlkit.document()
    doc.update({"package": packages, "metadata": metadata})
    return doc
コード例 #6
0
    def dumps(self, reqs, project: RootDependency, content=None) -> str:
        if content:
            doc = tomlkit.parse(content)
        else:
            doc = tomlkit.document()

        section = doc['source'] if 'source' in doc else tomlkit.aot()
        added_repos = {repo['name'] for repo in section}
        updated = False
        for req in reqs:
            if not isinstance(req.dep.repo, RepositoriesRegistry):
                continue
            for repo in req.dep.repo.repos:
                if repo.name in added_repos:
                    continue
                # https://github.com/pypa/pipenv/issues/2231
                if isinstance(repo, WarehouseLocalRepo):
                    continue
                added_repos.add(repo.name)
                source = tomlkit.table()
                source['name'] = repo.name
                source['url'] = repo.pretty_url
                source['verify_ssl'] = repo.pretty_url.startswith('https://')
                section.append(source)
                updated = True
        if updated:
            doc['source'] = section

        if project.python:
            python = Pythons(abstract=True).get_by_spec(project.python)
            if 'requires' not in doc:
                doc['requires'] = tomlkit.table()
            doc['requires']['python_version'] = str(python.get_short_version())

        for section, is_dev in [('packages', False), ('dev-packages', True)]:
            # create section if doesn't exist
            if section not in doc:
                doc[section] = tomlkit.table()
                continue

            # clean packages from old packages
            names = {req.name for req in reqs if is_dev is req.is_dev}
            for name in doc[section]:
                if name not in names:
                    del doc[section][name]

        # write new packages
        for section, is_dev in [('packages', False), ('dev-packages', True)]:
            for req in reqs:
                if is_dev is req.is_dev:
                    doc[section][req.raw_name] = self._format_req(req=req)

        return tomlkit.dumps(doc).rstrip() + '\n'
コード例 #7
0
ファイル: poetry.py プロジェクト: bkhanale/dephell
    def _add_repositories(section, reqs):
        # get repositories
        urls = dict()
        for req in reqs:
            if not isinstance(req.dep.repo, RepositoriesRegistry):
                continue
            for repo in req.dep.repo.repos:
                if isinstance(repo, WarehouseLocalRepo):
                    continue
                if urlparse(repo.pretty_url).hostname in ('pypi.org',
                                                          'pypi.python.org'):
                    continue
                urls[repo.name] = repo.pretty_url

        # remove or update old repositories
        added = []
        if section.get('source'):
            old = list(section['source'])
            section['source'] = tomlkit.aot()
            added = []
            for source in old:
                if source['name'] in urls:
                    if source['url'] != urls[source['name']]:
                        source['url'] = urls[source['name']]
                    section['source'].append(source)
                    added.append(source['name'])
            sources = section['source']
        else:
            sources = tomlkit.aot()

        # add new repositories
        for name, url in sorted(urls.items()):
            if name not in added:
                source = tomlkit.table()
                source['name'] = name
                source['url'] = url
                sources.append(source)

        section['source'] = sources
コード例 #8
0
ファイル: poetrylock.py プロジェクト: bkhanale/dephell
    def dumps(self, reqs, project: RootDependency, content=None) -> str:
        doc = tomlkit.parse(content) if content else tomlkit.document()
        doc['package'] = [self._format_req(req=req) for req in reqs]

        # add extras
        extras = defaultdict(list)
        for req in reqs:
            if req.is_main:
                for extra in req.main_envs:
                    extras[extra].append(req.name)
            if req.is_dev:
                for extra in req.dev_envs:
                    extras[extra].append(req.name)
        if extras:
            doc['extras'] = dict(extras)

        # add repositories
        sources = tomlkit.aot()
        added = set()
        for req in reqs:
            if not isinstance(req.dep.repo, RepositoriesRegistry):
                continue
            for repo in req.dep.repo.repos:
                if repo.name in added:
                    continue
                if isinstance(repo, WarehouseLocalRepo):
                    continue
                if urlparse(repo.pretty_url).hostname in ('pypi.org', 'pypi.python.org'):
                    continue
                added.add(repo.name)

                source = tomlkit.table()
                source['name'] = repo.name
                source['url'] = repo.pretty_url
                sources.append(source)
        if sources:
            doc['source'] = sources

        doc['metadata'] = {
            # sha256 of tool.poetry section from pyproject.toml
            # 'content-hash': ...,
            # 'platform': '*',
            'python-versions': str(project.python),
        }

        doc['metadata']['hashes'] = tomlkit.table()
        for req in reqs:
            doc['metadata']['hashes'][req.name] = list(req.hashes or [])

        return tomlkit.dumps(doc)
コード例 #9
0
    def migrate_source(self):
        if "source" not in self._pipenv:
            return

        for s in self._pipenv["source"]:
            if s["name"] == "pypi":
                continue

            source = table()
            source.add("name", s["name"])
            source.add("url", s["url"])

            if "source" not in self._pyproject["tool"]["poetry"]:
                self._pyproject["tool"]["poetry"]["source"] = aot()
            self._pyproject["tool"]["poetry"]["source"].append(source)
コード例 #10
0
ファイル: pipfile.py プロジェクト: hauwenc/dephell
    def dumps(self, reqs, project: RootDependency, content=None) -> str:
        if content:
            doc = tomlkit.parse(content)
        else:
            doc = tomlkit.document()

        if 'source' not in doc:
            doc['source'] = tomlkit.aot()

        added_repos = {repo['name'] for repo in doc['source']}
        for req in reqs:
            if not isinstance(req.dep.repo, WareHouseRepo):
                continue
            if req.dep.repo.name in added_repos:
                continue
            added_repos.add(req.dep.repo.name)
            doc['source'].append(
                OrderedDict([
                    ('name', req.dep.repo.name),
                    ('url', req.dep.repo.pretty_url),
                    ('verify_ssl', True),
                ]))

        if project.python:
            python = Pythons(abstract=True).get_by_spec(project.python)
            if 'requires' not in doc:
                doc['requires'] = tomlkit.table()
            doc['requires']['python_version'] = str(python.get_short_version())

        for section, is_dev in [('packages', False), ('dev-packages', True)]:
            # create section if doesn't exist
            if section not in doc:
                doc[section] = tomlkit.table()
                continue

            # clean packages from old packages
            names = {req.name for req in reqs if is_dev is req.is_dev}
            for name in doc[section]:
                if name not in names:
                    del doc[section][name]

        # write new packages
        for section, is_dev in [('packages', False), ('dev-packages', True)]:
            for req in reqs:
                if is_dev is req.is_dev:
                    doc[section][req.name] = self._format_req(req=req)

        return tomlkit.dumps(doc)
コード例 #11
0
def main(source, destination):
    '''Run main script.'''

    with open(source, 'rb') as fin:
        data = json.load(fin)

    # Process the tests.
    document = tomlkit.document()
    for key, tests in data.items():
        aot = tomlkit.aot()
        for test in tests:
            aot.append(create_test(test))
        document.add(key, aot)

    # Write to file.
    if destination is None:
        print(tomlkit.dumps(document), file=sys.stdout)
    else:
        with open(destination, 'w') as fout:
            print(tomlkit.dumps(document), file=fout)
コード例 #12
0
    def dicts_to_toml_aot(cls, dicts: Sequence[Mapping[str, Any]]):
        """
        Make a tomlkit Document consisting of an array of tables ("AOT").

        Args:
            dicts: A sequence of dictionaries

        Returns:
            A tomlkit`AoT<https://github.com/sdispater/tomlkit/blob/master/tomlkit/items.py>`_
            (i.e. ``[[array]]``)
        """
        import tomlkit

        aot = tomlkit.aot()
        for ser in dicts:
            tab = tomlkit.table()
            aot.append(tab)
            for k, v in ser.items():
                tab.add(k, v)
            tab.add(tomlkit.nl())
        return aot
コード例 #13
0
def format_lockfile(
    project: Project,
    mapping: dict[str, Candidate],
    fetched_dependencies: dict[str, list[Requirement]],
) -> dict:
    """Format lock file from a dict of resolved candidates, a mapping of dependencies
    and a collection of package summaries.
    """
    packages = tomlkit.aot()
    file_hashes = tomlkit.table()
    for k, v in sorted(mapping.items()):
        base = tomlkit.table()
        base.update(v.as_lockfile_entry(project.root))  # type: ignore
        base.add("summary", v.summary or "")
        deps = make_array(sorted(r.as_line() for r in fetched_dependencies[k]),
                          True)
        if len(deps) > 0:
            base.add("dependencies", deps)
        packages.append(base)  # type: ignore
        if v.hashes:
            key = f"{strip_extras(k)[0]} {v.version}"
            if key in file_hashes:
                continue
            array = tomlkit.array().multiline(True)
            for filename, hash_value in v.hashes.items():
                inline = make_inline_table({
                    "file": filename,
                    "hash": hash_value
                })
                array.append(inline)  # type: ignore
            if array:
                file_hashes.add(key, array)
    doc = tomlkit.document()
    doc.add("package", packages)  # type: ignore
    metadata = tomlkit.table()
    metadata.add("files", file_hashes)
    doc.add("metadata", metadata)  # type: ignore
    return cast(dict, doc)
コード例 #14
0
    def _add_repositories(section, root: RootDependency):
        # get repositories
        urls = dict()
        for repo in root.warehouses:
            if isinstance(repo, WarehouseLocalRepo):
                continue
            urls[repo.name] = repo.pretty_url

        # remove or update old repositories
        added = []
        sources = tomlkit.aot()
        if section.get('source'):
            if hasattr(section, 'item'):
                old_sources = section.item('source')
            else:
                old_sources = section['source']
            for source in old_sources:
                if source['name'] in urls:
                    if source['url'] != urls[source['name']]:
                        source['url'] = urls[source['name']]
                    sources.append(source)
                    added.append(source['name'])

        # add new repositories
        for name, url in sorted(urls.items()):
            if name not in added:
                source = tomlkit.table()
                source['name'] = name
                source['url'] = url
                sources.append(source)

        section['source'] = sources

        # remove section if empty
        if not section['source']:
            del section['source']
コード例 #15
0
def test_aot():
    t = tomlkit.aot()

    assert isinstance(t, AoT)
コード例 #16
0
def write_toml(
    xgm: XgmContainer,
    output_dirpath: str,
    output_tomlbase: str,
    progressfunc: Callable = None,
) -> None:
    """write an XgmContainer to a plaintext toml file and extracted contents

    :param xgm: XgmContainer instance
    :param output_dirpath: new directory to create and to unpack XGM contents to
    :param output_tomlbase: base filename to which to write the .XGM.toml file (will be
        put in output_dirpath)
    :param progressfunc: function to run whenever an item of the XgmContainer is about
      to be processed. It must accept three arguments: an int item index, an int total
      number of items, and an xgmitem.XgmImageItem/XgmModelItem instance
    """

    # prepare toml dir and toml file. writing a bit early here, but if dir/file can't be
    # written, it's better to error before the time-consuming part instead of after
    tomldir = output_dirpath
    tomlpath = os.path.join(tomldir, output_tomlbase)
    os.makedirs(tomldir, exist_ok=True)

    num_imageitems, num_modelitems = len(xgm.imageitems), len(xgm.modelitems)
    with open(tomlpath, "wt", encoding="utf-8") as tomlfile:
        try:
            tomldoc = tomlkit.parse(_toml_header)

            tomldoc.add("ImageItem", tomlkit.aot())
            for idx, imageitem in enumerate(xgm.imageitems):
                if progressfunc is not None:
                    progressfunc(idx, num_imageitems, imageitem)

                # Extract image item to file
                imageitem_outname = imageitem.name16.replace(os.path.sep,
                                                             "_")  # sanitize
                with open(os.path.join(tomldir, imageitem_outname),
                          "wb") as itemfile:
                    itemfile.write(imageitem.filedata)

                # Gather & add this image item's info to toml document
                tomlimage = tomlkit.table()
                tomlimage["name16"] = imageitem.name16
                if imageitem_outname != imageitem.name16:
                    tomlimage["file-path"] = imageitem_outname
                # noinspection PyArgumentList
                tomldoc["ImageItem"].append(tomlimage)

            if xgm.modelitems:
                tomldoc.add(tomlkit.nl())
            tomldoc.add("ModelItem", tomlkit.aot())
            for idx, modelitem in enumerate(xgm.modelitems):
                if progressfunc is not None:
                    progressfunc(idx, num_modelitems, modelitem)

                # Extract model item to file
                modelitem_outname = modelitem.name16.replace(os.path.sep,
                                                             "_")  # sanitize
                with open(os.path.join(tomldir, modelitem_outname),
                          "wb") as itemfile:
                    itemfile.write(modelitem.filedata)
                # Extract animation entry data to file
                animsep_outname = replaceext(modelitem_outname, ANIMSEP_EXT)
                with open(os.path.join(tomldir, animsep_outname),
                          "wb") as animsepfile:
                    animsepfile.write(modelitem.animdata)

                # Gather & add this model item's info to toml document
                tomlmodel = tomlkit.table()
                tomlmodel["name16"] = modelitem.name16
                if modelitem_outname != modelitem.name16:
                    tomlimage["file-path"] = modelitem_outname
                # noinspection PyArgumentList
                tomldoc["ModelItem"].append(tomlmodel)

            tomlfile.write(tomldoc.as_string())

        except Exception:
            # noinspection PyBroadException
            try:
                # For debug output, try to write current tomldoc + error traceback
                import traceback

                tb = traceback.format_exc()
                tomlfile.write(tomldoc.as_string())
                tomlfile.write(
                    "\n\n# == ERROR ENCOUNTERED DURING WRITING ==\n#")
                tomlfile.write("\n#".join(tb.split("\n")))
            except Exception:
                pass
            raise
コード例 #17
0
    def __call__(self) -> bool:
        path = self._global_config_path
        if path.exists():
            doc = tomlkit.parse(path.read_text(encoding='utf8'))
        else:
            doc = tomlkit.document()
        creds = doc.get('auth') or tomlkit.aot()

        hst = self.args.hostname
        usr = self.args.username
        pwd = self.args.password

        # delete all credentials for hostname
        if not usr:
            new = [cred for cred in creds if cred['hostname'] != hst]
            diff = len(creds) - len(new)
            if diff == 0:
                self.logger.error('cannot find credentials to remove', extra=dict(
                    hostname=hst,
                ))
                return False
            doc['auth'] = new
            path.write_text(tomlkit.dumps(doc), encoding='utf8')
            self.logger.info('credentials removed', extra=dict(
                hostname=hst,
                count=diff,
            ))
            return True

        # delete one credential
        if not pwd:
            new = [cred for cred in creds if cred['hostname'] != hst or cred['username'] != usr]
            diff = len(creds) - len(new)
            if diff == 0:
                self.logger.error('cannot find credentials to remove', extra=dict(
                    hostname=hst,
                    username=usr,
                ))
                return False
            doc['auth'] = new
            path.write_text(tomlkit.dumps(doc), encoding='utf8')
            self.logger.info('credentials removed', extra=dict(
                hostname=hst,
                username=usr,
            ))
            return True

        # update
        updated = False
        for cred in creds:
            if cred['hostname'] == hst and cred['username'] == usr:
                cred['password'] = pwd
                updated = True
        if updated:
            doc['auth'] = creds
            path.write_text(tomlkit.dumps(doc), encoding='utf8')
            self.logger.info('credentials updated', extra=dict(
                hostname=hst,
                username=usr,
            ))
            return True

        # add
        cred = tomlkit.table()
        cred['hostname'] = hst
        cred['username'] = usr
        cred['password'] = pwd
        creds.append(cred)
        doc['auth'] = creds
        path.write_text(tomlkit.dumps(doc), encoding='utf8')
        self.logger.info('credentials added', extra=dict(
            hostname=hst,
            username=usr,
        ))
        return True
コード例 #18
0
ファイル: test_build.py プロジェクト: pombredanne/tomlkit
def test_build_example(example):
    content = example("example")

    doc = document()
    doc.add(comment("This is a TOML document. Boom."))
    doc.add(nl())
    doc.add("title", "TOML Example")

    owner = table()
    owner.add("name", "Tom Preston-Werner")
    owner.add("organization", "GitHub")
    owner.add("bio", "GitHub Cofounder & CEO\\nLikes tater tots and beer.")
    owner.add("dob", datetime.datetime(1979, 5, 27, 7, 32, tzinfo=_utc))
    owner["dob"].comment("First class dates? Why not?")

    doc.add("owner", owner)

    database = table()
    database["server"] = "192.168.1.1"
    database["ports"] = [8001, 8001, 8002]
    database["connection_max"] = 5000
    database["enabled"] = True

    doc["database"] = database

    servers = table()
    servers.add(nl())
    c = comment(
        "You can indent as you please. Tabs or spaces. TOML don't care."
    ).indent(2)
    c.trivia.trail = ""
    servers.add(c)
    alpha = table()
    servers.append("alpha", alpha)
    alpha.indent(2)
    alpha.add("ip", "10.0.0.1")
    alpha.add("dc", "eqdc10")

    beta = table()
    servers.append("beta", beta)
    beta.add("ip", "10.0.0.2")
    beta.add("dc", "eqdc10")
    beta.add("country", "中国")
    beta["country"].comment("This should be parsed as UTF-8")
    beta.indent(2)

    doc["servers"] = servers

    clients = table()
    doc.add("clients", clients)
    clients["data"] = item(
        [["gamma", "delta"],
         [1, 2]]).comment("just an update to make sure parsers support it")

    clients.add(nl())
    clients.add(comment("Line breaks are OK when inside arrays"))
    clients["hosts"] = array("""[
  "alpha",
  "omega"
]""")

    doc.add(nl())
    doc.add(comment("Products"))

    products = aot()
    doc["products"] = products

    hammer = table().indent(2)
    hammer["name"] = "Hammer"
    hammer["sku"] = 738594937

    nail = table().indent(2)
    nail["name"] = "Nail"
    nail["sku"] = 284758393
    nail["color"] = "gray"

    products.append(hammer)
    products.append(nail)

    assert content == doc.as_string()
コード例 #19
0
ファイル: imctoml.py プロジェクト: boringhexi/gitarootools
def write_toml(
    imccontainer,
    output_dirpath,
    output_tomlbase,
    progressfunc=None,
):
    """write an ImcContainer to a plaintext toml file and extracted .sub.imc files

    imccontainer: ImcContainer instance
    output_dirpath: new directory to create and to unpack IMC contents to
    output_tomlbase: base filename to which to write the .IMC.toml file (will be put in
      output_dirpath)
    progressfunc: function to run whenever a subsong is about to be extracted from
      the ImcContainer. It must accept three arguments: an int subsong index,
      an int total number of subsongs, and an imccontainer.ContainerSubsong instance
    """

    # for zero-padding the number in the filename of each extracted subsong, 03 vs 003
    if imccontainer.num_subsongs <= 100:
        ssidx_width = 2
    else:
        ssidx_width = len(str(imccontainer.num_subsongs - 1))

    # prepare toml dir and toml file. writing a bit early here, but if dir/file can't be
    # written, it's better to error before the time-consuming part instead of after
    tomldir = output_dirpath
    tomlpath = os.path.join(tomldir, output_tomlbase)
    os.makedirs(tomldir, exist_ok=True)
    with open(tomlpath, "wt", encoding="utf-8") as tomlfile:
        try:

            tomldoc = tomlkit.parse(_toml_header)
            tomldoc.add("Subsong", tomlkit.aot())

            for ssidx, csubsong in enumerate(imccontainer.csubsongs):
                if progressfunc is not None:
                    progressfunc(ssidx, imccontainer.num_subsongs, csubsong)

                # Extract subsong to file
                ss_basefilename = f"{ssidx:0{ssidx_width}}.{csubsong.name}{SUBIMC_EXT}"
                # sanitize dir separators out of filename so it doesn't screw up
                ss_basefilename = ss_basefilename.replace(os.path.sep, "_")
                with open(os.path.join(tomldir, ss_basefilename),
                          "wb") as subsongfile:
                    subsongfile.write(csubsong.get_imcdata())

                # Gather & add this subsong's info to toml document
                tomlsubsong = tomlkit.table()
                tomlsubsong["name"] = csubsong.name
                tomlsubsong["loadmode"] = csubsong.loadmode
                tomlsubsong["basefile"] = ss_basefilename
                channel_nums = "".join(
                    str(x) for x in range(1, csubsong.num_channels + 1))
                comment = (f'channels-{channel_nums}-to-{channel_nums} = "'
                           f"replacement-audio{SUBSONG_FORMATS['wav']}"
                           '"')
                tomlsubsong.add(tomlkit.comment(comment))

                # Gather & add this subsong's diff-patch-info to toml document,
                # omitting anything with a None value
                tomldiffpinfo = tomlkit.table().indent(4)
                if csubsong.rawname is not None:
                    # bytes to ints
                    tomldiffpinfo["rawname"] = [x for x in csubsong.rawname]
                if not (csubsong.unk1, csubsong.unk2) == (None, None):
                    unk1 = 0 if csubsong.unk1 is None else csubsong.unk1
                    unk2 = 0 if csubsong.unk2 is None else csubsong.unk2
                    tomldiffpinfo["unk"] = [unk1, unk2]
                # saving original block layout
                if csubsong.original_block_layout is not None:
                    ofbp, obpc = csubsong.original_block_layout
                    # convert from possibly a tomlkit Integer (which retains indent) to
                    # a plain ol' int to prevent indent problems when rewritten to toml
                    ofbp, obpc = int(ofbp), int(obpc)
                    if ofbp is not None:
                        tomldiffpinfo["frames-per-block"] = ofbp
                    if obpc is not None:
                        tomldiffpinfo["blocks-per-channel"] = obpc
                if tomldiffpinfo:  # if tomldiffpinfo is empty, we won't bother
                    tomlsubsong.add("diff-patch-info", tomldiffpinfo)

                # noinspection PyArgumentList
                tomldoc["Subsong"].append(tomlsubsong)

        except Exception:
            # noinspection PyBroadException
            try:
                # For debug output, try to write current tomldoc + error traceback
                import traceback

                tb = traceback.format_exc()
                tomlfile.write(tomldoc.as_string())
                tomlfile.write(
                    "\n\n# == ERROR ENCOUNTERED DURING WRITING ==\n#")
                tomlfile.write("\n#".join(tb.split("\n")))
            except Exception:
                pass
            raise

        tomlfile.write(tomldoc.as_string())
コード例 #20
0
ファイル: matterbridge.py プロジェクト: rcos/rcos-automation
def run():
    # The toml document
    doc = document()

    # General settings
    doc['general'] = table()
    doc['general']['IgnoreFailureOnStart'] = False

    # DISCORD
    print('Let\'s setup Discord...')
    doc['discord'] = table()
    doc['discord'].comment('Discord server connection settings')
    doc['discord']['rcos'] = table()

    doc['discord']['rcos']['Token'] = get_from_env_or_input(
        'DISCORD_BOT_TOKEN', 'Bot Token: ')
    doc['discord']['rcos']['Token'].comment(
        'SECRET bot token found on https://discord.com/developers')

    doc['discord']['rcos']['Server'] = get_from_env_or_input(
        'RCOS_SERVER_ID', 'Server: ')
    doc['discord']['rcos']['Server'].comment(
        'The ID of the Discord server. Can be found in URL when on Discord or if Developer Mode is turned on and right-clicking the server icon.'
    )

    doc['discord']['rcos']['RemoteNickFormat'] = get_from_env_or_input(
        'MATTERBRIDGE_DISCORD_PREFIX',
        'Message prefix: ',
        default=DEFAULT_REMOTE_NICKNAME_FORMAT)
    doc['discord']['rcos']['RemoteNickFormat'].comment(
        'The prefix to apply to messages.')

    # MATTERMOST
    print('\n\nNow Mattermost...')
    doc['mattermost'] = table()
    doc['mattermost'].comment('Mattermost server connection settings')
    doc['mattermost']['rcos'] = table()

    doc['mattermost']['rcos']['Server'] = 'chat.rcos.io:443'
    doc['mattermost']['rcos']['Server'].comment(
        'URL of the Mattermost server with no http:// or https:// prepended')

    doc['mattermost']['rcos']['Team'] = 'rcos'
    doc['mattermost']['rcos']['Team'].comment(
        'The "team", found as the first part of URL when on Mattermost server')

    doc['mattermost']['rcos']['Login'] = get_from_env_or_input(
        'MATTERMOST_USERNAME', 'Username: '******'mattermost']['rcos']['Login'].comment(
        'Mattermost needs a user account to send/receive messages. This is the account username.'
    )

    doc['mattermost']['rcos']['Password'] = get_from_env_or_input(
        'MATTERMOST_PASSWORD', 'Password: '******'mattermost']['rcos']['Password'].comment(
        'The password of the Mattermost account to use.')

    doc['mattermost']['rcos']['RemoteNickFormat'] = get_from_env_or_input(
        'MATTERBRIDGE_MATTERMOST_PREFIX',
        'Message prefix: ',
        default=DEFAULT_REMOTE_NICKNAME_FORMAT)
    doc['mattermost']['rcos']['RemoteNickFormat'].comment(
        'The prefix to apply to messages.')

    # The channels to pair
    # (Discord channel, Mattermost channel)
    channel_pairs = []
    print(
        "Enter the channels you want to pair on each line and enter an empty line to finish.\nDiscord,Mattermost"
    )

    line = input()
    while len(line) > 0:
        channel_pairs.append(line.split(","))
        line = input()

    gateways = aot()

    # Create the gateways in the document
    for pair in enumerate(channel_pairs):
        gateway = table()
        gateway['name'] = f'gateway-{pair[1]}'
        gateway['enable'] = True

        # inout means that messages are sent/received both ways
        gateway['inout'] = aot()
        gateway_discord = table()
        gateway_discord['account'] = 'discord.rcos'
        gateway_discord['channel'] = pair[0]
        gateway['inout'].append(gateway_discord)

        gateway_mattermost = table()
        gateway_mattermost['account'] = 'mattermost.rcos'
        gateway_mattermost['channel'] = pair[1]
        gateway['inout'].append(gateway_mattermost)

        gateways.append(gateway)

    doc.add('gateway', gateways)

    # Write the output to a file
    with open('matterbridge.toml', 'w') as outfile:
        outfile.write(dumps(doc))
        print(
            'Wrote output to matterbridge.toml. Now place it where Matterbridge wants it!'
        )
コード例 #21
0
ファイル: pipfile.py プロジェクト: yyolk/dephell
    def dumps(self, reqs, project: RootDependency, content=None) -> str:
        if content:
            doc = tomlkit.parse(content)
        else:
            doc = tomlkit.document()

        # repositories
        section = doc['source'] if 'source' in doc else tomlkit.aot()
        added_repos = {repo['name'] for repo in section}
        updated = False
        for req in reqs:
            if not isinstance(req.dep.repo, WarehouseBaseRepo):
                continue
            for repo in req.dep.repo.repos:
                if repo.from_config:
                    continue
                if repo.name in added_repos:
                    continue
                # https://github.com/pypa/pipenv/issues/2231
                if isinstance(repo, WarehouseLocalRepo):
                    continue
                added_repos.add(repo.name)
                source = tomlkit.table()
                source['name'] = repo.name
                source['url'] = repo.pretty_url
                source['verify_ssl'] = repo.pretty_url.startswith('https://')
                section.append(source)
                updated = True
        # pipenv doesn't work without explicit repo
        if not added_repos:
            source = tomlkit.table()
            source['name'] = 'pypi'
            source['url'] = 'https://pypi.org/simple/'
            source['verify_ssl'] = True
            section.append(source)
            updated = True
        if updated:
            doc['source'] = section

        # python version
        if project.python:
            python = Pythons(abstract=True).get_by_spec(project.python)
            if 'requires' not in doc:
                doc['requires'] = tomlkit.table()
            doc['requires']['python_version'] = str(python.get_short_version())

        # dependencies
        names_mapping = dict()
        for section, is_dev in [('packages', False), ('dev-packages', True)]:
            # create section if doesn't exist
            if section not in doc:
                doc[section] = tomlkit.table()
                continue

            # clean file from outdated dependencies
            names = {req.name for req in reqs if is_dev is req.is_dev}
            for name in dict(doc[section]):
                normalized_name = canonicalize_name(name)
                names_mapping[normalized_name] = name
                if normalized_name not in names:
                    del doc[section][name]

        # write new packages
        for section, is_dev in [('packages', False), ('dev-packages', True)]:
            for req in reqs:
                if is_dev is not req.is_dev:
                    continue
                raw_name = names_mapping.get(req.name, req.raw_name)
                old_spec = doc[section].get(raw_name)

                # do not overwrite dep if nothing is changed
                if old_spec:
                    old_dep = self._make_deps(
                        root=RootDependency(),
                        name=raw_name,
                        content=old_spec,
                    )[0]
                    if req.same_dep(old_dep):
                        continue

                # overwrite
                doc[section][raw_name] = self._format_req(req=req)

        return tomlkit.dumps(doc).rstrip() + '\n'