def add_configuration(configurations: list[MypyDistConf], distribution: str) -> None: with open(os.path.join("stubs", distribution, "METADATA.toml")) as f: data = dict(tomli.loads(f.read())) mypy_tests_conf = data.get("mypy-tests") if not mypy_tests_conf: return assert isinstance(mypy_tests_conf, dict), "mypy-tests should be a section" for section_name, mypy_section in mypy_tests_conf.items(): assert isinstance(mypy_section, dict), "{} should be a section".format(section_name) module_name = mypy_section.get("module_name") assert module_name is not None, "{} should have a module_name key".format( section_name) assert isinstance( module_name, str), "{} should be a key-value pair".format(section_name) values = mypy_section.get("values") assert values is not None, "{} should have a values section".format( section_name) assert isinstance(values, dict), "values should be a section" configurations.append(MypyDistConf(module_name, values.copy()))
def read(self, filenames): # RawConfigParser takes a filename or list of filenames, but we only # ever call this with a single filename. assert isinstance(filenames, (bytes, str, os.PathLike)) filename = os.fspath(filenames) try: with open(filename, encoding='utf-8') as fp: toml_text = fp.read() except OSError: return [] if tomli is not None: toml_text = substitute_variables(toml_text, os.environ) try: self.data = tomli.loads(toml_text) except tomli.TOMLDecodeError as err: raise TomlDecodeError(str(err)) from err return [filename] else: has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE) if self.our_file or has_toml: # Looks like they meant to read TOML, but we can't read it. msg = "Can't read {!r} without TOML support. Install with [toml] extra" raise CoverageException(msg.format(filename)) return []
def check_metadata(): for distribution in os.listdir("stubs"): with open(os.path.join("stubs", distribution, "METADATA.toml")) as f: data = tomli.loads(f.read()) assert "version" in data, f"Missing version for {distribution}" version = data["version"] msg = f"Unsupported Python version {version}" assert isinstance(version, str), msg assert re.fullmatch(r"\d+(\.\d+)+|\d+(\.\d+)*\.\*", version), msg for key in data: assert key in metadata_keys, f"Unexpected key {key} for {distribution}" assert isinstance(data.get("python2", False), bool), f"Invalid python2 value for {distribution}" assert isinstance(data.get("requires", []), list), f"Invalid requires value for {distribution}" for dep in data.get("requires", []): assert isinstance( dep, str), f"Invalid dependency {dep} for {distribution}" for space in " \t\n": assert space not in dep, f"For consistency dependency should not have whitespace: {dep}" assert ";" not in dep, f"Semicolons in dependencies are not supported, got {dep}" stripped, relation, dep_version = _strip_dep_version(dep) if relation: msg = f"Bad version in dependency {dep}" assert relation in {"==", ">", ">=", "<", "<="}, msg assert version.count(".") <= 2, msg for part in version.split("."): assert part.isnumeric(), msg
def read_one_toml(self, tomlfile: str) -> None: """ Read one .toml file if it exists, adding values to `self`. """ tomlpath = Path(tomlfile) if not tomlpath.exists(): return toml_text = tomlpath.read_text() if tomli is None: # Toml support isn't installed. Only print an exception if the # config file seems to have settings for us. has_scriv = re.search(r"(?m)^\[tool\.scriv\]", toml_text) if has_scriv: msg = ( "Can't read {!r} without TOML support. " + "Install with [toml] extra" ).format(tomlfile) raise Exception(msg) else: # We have toml installed, parse the file and look for our settings. data = tomli.loads(toml_text) try: scriv_data = data["tool"]["scriv"] except KeyError: # No settings for us return for attrdef in attr.fields(_Options): try: val = scriv_data[attrdef.name] except KeyError: pass else: setattr(self._options, attrdef.name, val)
def check_metadata() -> None: for distribution in os.listdir("stubs"): with open(os.path.join("stubs", distribution, "METADATA.toml")) as f: data = tomli.loads(f.read()) assert "version" in data, f"Missing version for {distribution}" version = data["version"] msg = f"Unsupported version {repr(version)}" assert isinstance(version, str), msg # Check that the version parses Version(version.removesuffix(".*")) for key in data: assert key in metadata_keys, f"Unexpected key {key} for {distribution}" assert isinstance(data.get("requires", []), list), f"Invalid requires value for {distribution}" for dep in data.get("requires", []): assert isinstance( dep, str), f"Invalid requirement {repr(dep)} for {distribution}" for space in " \t\n": assert space not in dep, f"For consistency, requirement should not have whitespace: {dep}" # Check that the requirement parses Requirement(dep) assert set(data.get("tool", [])).issubset( tool_keys.keys()), f"Unrecognised tool for {distribution}" for tool, tk in tool_keys.items(): for key in data.get("tool", {}).get(tool, {}): assert key in tk, f"Unrecognised {tool} key {key} for {distribution}"
def get_backends(): pyproject = tomli.loads(Path("pyproject.toml").read_text()) backends = pyproject["tool"]["poetry"]["plugins"]["ibis.backends"] del backends["spark"] return [ (backend, getattr(ibis, backend)) for backend in sorted(backends.keys()) ]
def generate_mod(mod_file, url_base, flags, writer, modcount): try: zip = zipfile.ZipFile(mod_file) except Exception as e: print(mod_file + " failed to read") raise e mod_sha = sha256(mod_file) name = None version = None if 'mcmod.info' in zip.namelist(): try: f = zip.open('mcmod.info') data = json.load(f) if 'modListVersion' in data and data['modListVersion'] == 2: data = data['modList'] name = data[0]['modid'] if 'version' in data[0]: version = data[0]['version'] else: print("Warning: Mod {} is apparently incapable of specifying a version number in their mcmod.info. Using 'unknown', this may have weird side effects".format(name)) version = 'unknown' except ValueError as e: print("Warning: Mod {} does not contain mcmod.info (or it does not follow correct format). Guessing information, this may have weird side effects".format(mod_file)) except json.decoder.JSONDecodeError as e: print("Warning: Author of mod {} is apparently incapable of writing correctly formatted json. Guessing information, this may have weird side effects ({})".format(mod_file, e)) except Exception as e: print("Irgendwas kaputt: {}".format(e)) elif 'META-INF/mods.toml' in zip.namelist(): try: # Only use tomli if we have to parse TOML import tomli with zip.open('META-INF/mods.toml', 'r') as f: toml = f.read().decode('utf-8') data = tomli.loads(toml) mod = data['mods'][0] name = mod['modId'] if 'version' in mod and mod['version'] != "${file.jarVersion}": version = mod['version'] else: print("{} failed to actually have gradle replace '${{file.jarVersion}}'. Good job.".format(mod_file)) except tomli.TOMLDecodeError as e: print("{} does appear to be contain a mods.toml with invalid TOML. ({})".format(mod_file,e)) else: print("Warning: Mod {} does not contain mcmod.info (or it does not follow correct format). Guessing information, this may have weird side effects".format(mod_file)) if name == None: name = guess_mod_name(path.basename(mod_file)) if version == None or not version[0].isdigit(): # Default the version to the first 8 bytes of the sha of it's unknown, or doesn't look version-like (digits) # This makes it so that the update logic isn't getting stuck on mod updates where the version hasn't changed. version = mod_sha[0:8] name = apply_mod_count(modcount, name) our_flags = flags[name] if name in flags else '' writer.write("{},{},{}/mods/{},mod,{},{}\n".format(name, version, url_base, urllib.parse.quote(path.basename(mod_file)), mod_sha, our_flags))
def is_supported(distribution, major): dist_path = Path("stubs", distribution) with open(dist_path / "METADATA.toml") as f: data = dict(tomli.loads(f.read())) if major == 2: # Python 2 is not supported by default. return bool(data.get("python2", False)) or (dist_path / "@python2").exists() # Python 3 is supported by default. return has_py3_stubs(dist_path)
def main() -> None: """Print restructuredText table of dependencies.""" path = PROJECT / "pyproject.toml" text = path.read_text() text = JINJA_PATTERN.sub("", text) text = JINJA_PATTERN2.sub("x", text) data = tomli.loads(text) dependencies = { canonicalize_name(dependency) for section in ["dependencies", "dev-dependencies"] for dependency in data["tool"]["poetry"][section].keys() if dependency != "python" } path = PROJECT / "poetry.lock" text = path.read_text() data = tomli.loads(text) descriptions = { canonicalize_name(package["name"]): truncate_description(package["description"]) for package in data["package"] if package["name"] in dependencies } table = { format_dependency(dependency): descriptions[dependency] for dependency in sorted(dependencies) } width = max(len(name) for name in table) width2 = max(len(description) for description in table.values()) separator = LINE_FORMAT.format( name="=" * width, width=width, description="=" * width2 ) print(separator) for name, description in table.items(): line = LINE_FORMAT.format(name=name, width=width, description=description) print(line) print(separator)
def test_invalid_config(self): config = tomli.loads(self.sample_config) config["foo"] = "bar" with pytest.raises(ValidationError) as exc_info: models.Settings(**config) assert exc_info.value.errors() == [{ 'loc': ('foo', ), 'msg': 'extra fields not permitted', 'type': 'value_error.extra' }]
def load_project_config_data(path: Path) -> Optional[Tuple[Path, DictStrAny]]: for item in (f".{__app__}.toml", "pyproject.toml"): maybe_config_path = path / item if not maybe_config_path.exists(): continue data = tomli.loads(maybe_config_path.read_text()) return (maybe_config_path, data.get("tool", {}).get(__app__, {}) or {}) return None
def run(run_count = 5000): test_data = '' with open('data.toml', 'r', encoding='utf-8') as f: test_data = f.read() print(f'Parsing data.toml {run_count} times:') baseline = benchmark('pytomlpp', run_count, lambda: pytomlpp.loads(test_data)) benchmark('tomli', run_count, lambda: tomli.loads(test_data), compare_to=baseline) benchmark('toml', run_count, lambda: toml.loads(test_data), compare_to=baseline) benchmark('qtoml', run_count, lambda: qtoml.loads(test_data), compare_to=baseline) benchmark('tomlkit', run_count, lambda: tomlkit.parse(test_data), compare_to=baseline)
def get_config(): cwd = os.getcwd() config_file = os.path.join(cwd, "config.toml") with open(config_file) as f: content = f.read() config = tomli.loads(content) config = models.Settings(**config) return config
def read_dependencies(distribution: str) -> list[str]: with open(os.path.join("stubs", distribution, "METADATA.toml")) as f: data = dict(tomli.loads(f.read())) requires = data.get("requires", []) assert isinstance(requires, list) dependencies = [] for dependency in requires: assert isinstance(dependency, str) assert dependency.startswith("types-") dependencies.append(dependency[6:]) return dependencies
def validate_pyproject_toml_generated(path: Path): from validate_pyproject import api # let's assume that you have access to a `loads` function # responsible for parsing a string representing the TOML file # (you can check the `toml` or `tomli` projects for that) with path.open() as fr: pyproject_as_dict = tomli.loads(fr.read()) # now we can use validate-pyproject validator = api.Validator() validator(pyproject_as_dict)
def parse_pyproject_toml(path_config: str) -> Dict[str, Any]: """Parse a pyproject toml file, pulling out relevant parts for Black If parsing fails, will raise a tomli.TOMLDecodeError """ with open(path_config, encoding="utf8") as f: pyproject_toml = tomli.loads(f.read()) config = pyproject_toml.get("tool", {}).get("black", {}) return { k.replace("--", "").replace("-", "_"): v for k, v in config.items() }
def load_config_dict_from_file( filepath: Path, ) -> Optional[Dict[str, Union[str, List[str]]]]: """Load pytest configuration from the given file path, if supported. Return None if the file does not contain valid pytest configuration. """ # Configuration from ini files are obtained from the [pytest] section, if present. if filepath.suffix == ".ini": iniconfig = _parse_ini_config(filepath) if "pytest" in iniconfig: return dict(iniconfig["pytest"].items()) else: # "pytest.ini" files are always the source of configuration, even if empty. if filepath.name == "pytest.ini": return {} # '.cfg' files are considered if they contain a "[tool:pytest]" section. elif filepath.suffix == ".cfg": iniconfig = _parse_ini_config(filepath) if "tool:pytest" in iniconfig.sections: return dict(iniconfig["tool:pytest"].items()) elif "pytest" in iniconfig.sections: # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. elif filepath.suffix == ".toml": import tomli toml_text = filepath.read_text(encoding="utf-8") try: config = tomli.loads(toml_text) except tomli.TOMLDecodeError as exc: raise UsageError(str(exc)) from exc result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) if result is not None: # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), # however we need to convert all scalar values to str for compatibility with the rest # of the configuration system, which expects strings only. def make_scalar(v: object) -> Union[str, List[str]]: return v if isinstance(v, list) else str(v) return {k: make_scalar(v) for k, v in result.items()} return None
def _is_compatible_stub_package(self, stub_dir: str) -> bool: """Does a stub package support the target Python version? Stub packages may contain a metadata file which specifies whether the stubs are compatible with Python 2 and 3. """ metadata_fnam = os.path.join(stub_dir, 'METADATA.toml') if os.path.isfile(metadata_fnam): # Delay import for a possible minor performance win. import tomli with open(metadata_fnam, encoding="utf-8") as f: metadata = tomli.loads(f.read()) if self.python_major_ver == 2: return bool(metadata.get('python2', False)) else: return bool(metadata.get('python3', True)) return True
def mkfunc(compiler, genfiles, execute, plot, build): """Generate tests""" template_loader = FileSystemLoader('./') env = Environment(loader=template_loader) buildtemplate = env.get_template("cmake.jinja") tests_dict = tomli.loads(Path("gen.toml").read_bytes().decode()) execenv = os.environ.copy() execenv["FC"] = compiler for mod, ftests in tests_dict.items(): testtemplate = env.get_template(f"{mod}.f90.jinja") lfmn = f"lfortran_intrinsic_{mod}.f90" lfmod = Path(Path.cwd().parent / lfmn).absolute() moddirname = Path(Path.cwd() / f"gentests/{mod}").absolute() Path.mkdir(moddirname, parents=True, exist_ok=True) for func in ftests: funcdirname = moddirname / func['fname'] Path.mkdir(funcdirname, exist_ok=True) shutil.copy(lfmod, funcdirname) fn = f"{func['fname']}_test.f90" test_data = { 'test_name': f"{func['fname']}_test", 'test_files': [lfmn, fn] } func['compiler'] = compiler Path.write_text(funcdirname / fn, testtemplate.render(func)) Path.write_text(funcdirname / 'CMakeLists.txt', buildtemplate.render(test_data)) subprocess.Popen(['cmake', '.'], env=execenv, cwd=funcdirname).wait() if build == True: subprocess.Popen(['cmake', '--build', '.'], env=execenv, cwd=funcdirname).wait() if execute == True: with open( str(funcdirname.absolute()) + f"/{compiler}_{func['fname']}_output.dat", "w") as res: subprocess.Popen([f"./{test_data['test_name']}"], env=execenv, cwd=funcdirname, stdout=res).wait() if plot == True: mkplot(funcdirname.absolute() + f"/{compiler}_{func['fname']}_output.dat")
def read_config_toml(project_root: Path, config_file: str) -> _ConfigDict: path = project_root / config_file if not path.is_file(): return {} try: pyproject_toml = tomli.loads(path.read_text(encoding="utf-8")) except (tomli.TOMLDecodeError, OSError) as e: raise click.FileError(filename=config_file, hint=f"Error reading {config_file}:\n{e}") ward_config = pyproject_toml.get("tool", {}).get("ward", {}) ward_config = { k.replace("--", "").replace("-", "_"): v for k, v in ward_config.items() } return ward_config
def get_project_name() -> str: """Find project name, which is prefix for info distributions. Returns: str: the name of package specified in pyproject.toml Raises: EnvironmentError: if file pyproject.toml is not found. """ pyproject_path = search_upwards_for_file("pyproject.toml") if pyproject_path is None: raise EnvironmentError( "Illegal directory: cannot find file pyproject.toml " f"from current directory: {Path.cwd()}" ) pyproject = tomli.loads(pyproject_path.read_text()) name = pyproject["tool"]["poetry"]["name"].replace("-", "_") print(f"Package {name} is found in {pyproject_path.absolute()}") return name
def build_deps(package, sdist_file): """Find out what are the build dependencies for a package. We need to "manually" install them, since pip will not install build deps with `--no-build-isolation`. """ import tomli as toml # delay importing, since pytest discovery phase may hit this file from a # testenv without tomli archive = Archive(sdist_file) pyproject = _read_pyproject(archive) info = toml.loads(pyproject) deps = info.get("build-system", {}).get("requires", []) deps += EXTRA_BUILD_DEPS.get(package, []) # Remove setuptools from requirements (and deduplicate) requirements = {Requirement(d).name: d for d in deps} return [v for k, v in requirements.items() if k != "setuptools"]
def main(args: argparse.Namespace) -> None: input_dir = args.input_directory # create poetry things poetry = Factory().create_poetry(input_dir) sdist_builder = SdistBuilder(poetry) # generate setup.py code code = sdist_builder.build_setup().decode("UTF-8") # pull out black config config = tomli.loads(input_dir.joinpath("pyproject.toml").read_text()) black_config = config["tool"]["black"] black_config["string_normalization"] = black_config.pop( "skip_string_normalization", False ) black_config.pop("exclude", None) out = black.format_file_contents( code, fast=False, mode=black.Mode(**black_config) ) print(DOUBLE_PIPE_REGEX.sub("|", out), file=args.output_file, end="")
def main(): argv = sys.argv[1:] if argv and argv[0] in ('-v', '--version'): print('readthedocs-custom-steps', __version__) return if PYPROJECT_TOML.exists(): config = tomli.loads(PYPROJECT_TOML.read_text()).get( 'tool', {}).get('readthedocs-custom-steps') filename = PYPROJECT_TOML else: config = None filename = None if config is None: filename = find_config_file() config = yaml.safe_load(filename.read_text()) env = os.environ.copy() shims = find_pyenv_shims() if shims: env.update({f'PYTHON{x}{y}': p for (x, y), p in shims.items()}) env['PYTHON'] = shims[max(shims)] shell = os.getenv('SHELL', '/bin/sh') bash_script = 'set -e\n' if 'steps' in config: assert isinstance(config['steps'], list) bash_script += '\n'.join(config['steps']) elif 'script' in config: bash_script += textwrap.dedent(config['script']) else: raise RuntimeError( f'configuration "{filename}" contains no "script" or "steps" key') command = [shell, '-c', bash_script] + sys.argv print('[readthedocs-custom-steps dispatch]: running', command, file=sys.stderr) sys.exit(subprocess.call(command, env=env))
def _read_toml_config( path: Path, sections: str = "tool.pytask.ini_options") -> dict[str, Any]: """Read the configuration from a ``*.toml`` file. Raises ------ tomli.TOMLDecodeError Raised if ``*.toml`` could not be read. KeyError Raised if the specified sections do not exist. """ sections_ = sections.split(".") config = tomli.loads(path.read_text(encoding="utf-8")) for section in sections_: config = config[section] return config
def guess_version_files(config: ProjectConfig) -> Tuple[str, ...]: if config.project_type == ProjectTypeEnum.javascript: return (FILE_PACKAGE_JSON,) path = config.path version_files = [] maybe_pyproject_toml_path = path / FILE_PYPROJECT_TOML if maybe_pyproject_toml_path.exists(): version_files.append(FILE_PYPROJECT_TOML) project_name = ( tomli.loads(maybe_pyproject_toml_path.read_text()) .get("tool", {}) .get("poetry", {}) .get("name") ) if project_name: for package in (".", "./src"): package_path = path / package if (package_path / project_name / "__init__.py").exists(): version_files.append( f"{package}/{project_name}/__init__.py" ) if (package_path / project_name / "__version__.py").exists(): version_files.append( f"{package}/{project_name}/__version__.py" ) if (package_path / f"{project_name}.py").exists(): version_files.append(f"{package}/{project_name}.py") return tuple(version_files)
def find_project_version(config: ProjectConfig) -> Optional[str]: if config.project_type == ProjectTypeEnum.javascript: package_json_path = config.path / "package.json" if package_json_path.exists(): try: return cast( str, json.loads(package_json_path.read_text())["version"] ) except (KeyError, ValueError): ... else: pyproject_toml_path = config.path / "pyproject.toml" if pyproject_toml_path.exists(): try: return cast( str, tomli.loads(pyproject_toml_path.read_text())["tool"][ "poetry" ]["version"], ) except (KeyError, ValueError): ... return None
def find_literal(file_name: str, literal_name: str) -> Optional[str]: """ Look inside a file for a literal value, and return it. Returns: The string value found, or None if not found. """ ext = os.path.splitext(file_name)[-1] if ext == ".py": with open(file_name, encoding="utf-8") as f: node = ast.parse(f.read()) return PythonLiteralFinder().find(node, literal_name) elif ext == ".toml": if tomli is None: msg = ("Can't read {!r} without TOML support. " + "Install with [toml] extra").format(file_name) raise Exception(msg) with open(file_name, encoding="utf-8") as f: data = tomli.loads(f.read()) return find_toml_value(data, literal_name) else: raise Exception( "Can't read literals from files like {!r}".format(file_name))
def prepare(): toml = s.get(config_url).text toml = tomli.loads(toml) version_date = toml['date'] version = toml['pkg']['rust']['version'].split('-', 1)[0] cargo_version = toml['pkg']['cargo']['version'].split('-', 1)[0] rustfmt_version = toml['pkg']['rustfmt-preview']['version'].split('-', 1)[0] if not rustfmt_version: return 'no rustfmt available' clippy_version = toml['pkg']['clippy-preview']['version'].split('-', 1)[0] try: clippy_url = toml['pkg']['clippy-preview']['target'] \ ['x86_64-unknown-linux-gnu']['xz_url'] except KeyError: return 'no clippy available?' stds = [ Std(target, toml['pkg']['rust-std']['target'][target]) for target in STDS ] loader = tornado.template.Loader('.') global PKGBUILD PKGBUILD = loader.load('PKGBUILD.tmpl').generate( stds=stds, version=version, version_date=version_date.replace('-', ''), version_date_raw=version_date, cargo_version=cargo_version, rustfmt_version=rustfmt_version, clippy_version=clippy_version, clippy_url=clippy_url, )
def parse_config_file(options: Options, set_strict_flags: Callable[[], None], filename: Optional[str], stdout: Optional[TextIO] = None, stderr: Optional[TextIO] = None) -> None: """Parse a config file into an Options object. Errors are written to stderr but are not fatal. If filename is None, fall back to default config files. """ stdout = stdout or sys.stdout stderr = stderr or sys.stderr if filename is not None: config_files: Tuple[str, ...] = (filename,) else: config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES)) config_parser = configparser.RawConfigParser() for config_file in config_files: if not os.path.exists(config_file): continue try: if is_toml(config_file): with open(config_file, encoding="utf-8") as f: toml_data = tomli.loads(f.read()) # Filter down to just mypy relevant toml keys toml_data = toml_data.get('tool', {}) if 'mypy' not in toml_data: continue toml_data = {'mypy': toml_data['mypy']} parser: MutableMapping[str, Any] = destructure_overrides(toml_data) config_types = toml_config_types else: config_parser.read(config_file) parser = config_parser config_types = ini_config_types except (tomli.TOMLDecodeError, configparser.Error, ConfigTOMLValueError) as err: print("%s: %s" % (config_file, err), file=stderr) else: if config_file in defaults.SHARED_CONFIG_FILES and 'mypy' not in parser: continue file_read = config_file options.config_file = file_read break else: return os.environ['MYPY_CONFIG_FILE_DIR'] = os.path.dirname( os.path.abspath(config_file)) if 'mypy' not in parser: if filename or file_read not in defaults.SHARED_CONFIG_FILES: print("%s: No [mypy] section in config file" % file_read, file=stderr) else: section = parser['mypy'] prefix = '%s: [%s]: ' % (file_read, 'mypy') updates, report_dirs = parse_section( prefix, options, set_strict_flags, section, config_types, stderr) for k, v in updates.items(): setattr(options, k, v) options.report_dirs.update(report_dirs) for name, section in parser.items(): if name.startswith('mypy-'): prefix = get_prefix(file_read, name) updates, report_dirs = parse_section( prefix, options, set_strict_flags, section, config_types, stderr) if report_dirs: print("%sPer-module sections should not specify reports (%s)" % (prefix, ', '.join(s + '_report' for s in sorted(report_dirs))), file=stderr) if set(updates) - PER_MODULE_OPTIONS: print("%sPer-module sections should only specify per-module flags (%s)" % (prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))), file=stderr) updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS} globs = name[5:] for glob in globs.split(','): # For backwards compatibility, replace (back)slashes with dots. glob = glob.replace(os.sep, '.') if os.altsep: glob = glob.replace(os.altsep, '.') if (any(c in glob for c in '?[]!') or any('*' in x and x != '*' for x in glob.split('.'))): print("%sPatterns must be fully-qualified module names, optionally " "with '*' in some components (e.g spam.*.eggs.*)" % prefix, file=stderr) else: options.per_module_options[glob] = updates