def remote_initconfig(option_dict, args): from _pytest.config import Config option_dict['plugins'].append("no:terminal") config = Config.fromdictargs(option_dict, args) config.option.appliances = [] config.args = args return config
def test_inifilename(self, tmpdir): tmpdir.join("foo/bar.ini").ensure().write(_pytest._code.Source(""" [pytest] name = value """)) from _pytest.config import Config inifile = '../../foo/bar.ini' option_dict = { 'inifilename': inifile, 'capture': 'no', } cwd = tmpdir.join('a/b') cwd.join('pytest.ini').ensure().write(_pytest._code.Source(""" [pytest] name = wrong-value should_not_be_set = true """)) with cwd.ensure(dir=True).as_cwd(): config = Config.fromdictargs(option_dict, ()) assert config.args == [str(cwd)] assert config.option.inifilename == inifile assert config.option.capture == 'no' # this indicates this is the file used for getting configuration values assert config.inifile == inifile assert config.inicfg.get('name') == 'value' assert config.inicfg.get('should_not_be_set') is None
def test_origargs(self): """Show that fromdictargs can handle args in their "orig" format""" from _pytest.config import Config option_dict = {} args = ['-vvvv', '-s', 'a', 'b'] config = Config.fromdictargs(option_dict, args) assert config.args == ['a', 'b'] assert config._origargs == args assert config.option.verbose == 4 assert config.option.capture == 'no'
def remote_initconfig(option_dict, args): from _pytest.config import Config option_dict['plugins'].append("no:terminal") config = Config.fromdictargs(option_dict, args) config.option.looponfail = False config.option.usepdb = False config.option.dist = "no" config.option.distload = False config.option.numprocesses = None config.args = args return config
def test_origargs(self): """Show that fromdictargs can handle args in their "orig" format""" from _pytest.config import Config option_dict = {} args = ["-vvvv", "-s", "a", "b"] config = Config.fromdictargs(option_dict, args) assert config.args == ["a", "b"] assert config._origargs == args assert config.option.verbose == 4 assert config.option.capture == "no"
def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[int]: for setting_name in setting_names: log_level = config.getoption(setting_name) if log_level is None: log_level = config.getini(setting_name) if log_level: break else: return None if isinstance(log_level, str): log_level = log_level.upper() try: return int(getattr(logging, log_level, log_level)) except ValueError as e: # Python logging does not recognise this as a logging level raise pytest.UsageError( "'{}' is not recognized as a logging level name for " "'{}'. Please consider passing the " "logging level num instead.".format(log_level, setting_name)) from e
def remote_initconfig(option_dict, args): from _pytest.config import Config option_dict["plugins"].append("no:terminal") config = Config.fromdictargs(option_dict, args) config.option.looponfail = False config.option.usepdb = False config.option.dist = "no" config.option.distload = False config.option.numprocesses = None config.option.maxprocesses = None config.args = args return config
def test_basic_behavior(self): from _pytest.config import Config option_dict = {"verbose": 444, "foo": "bar", "capture": "no"} args = ["a", "b"] config = Config.fromdictargs(option_dict, args) with pytest.raises(AssertionError): config.parse(["should refuse to parse again"]) assert config.option.verbose == 444 assert config.option.foo == "bar" assert config.option.capture == "no" assert config.args == args
def wrap_session( config: Config, doit: Callable[[Config, "Session"], Optional[Union[int, ExitCode]]] ) -> Union[int, ExitCode]: """Skeleton command line program.""" session = Session.from_config(config) session.exitstatus = ExitCode.OK initstate = 0 try: try: config._do_configure() initstate = 1 config.hook.pytest_sessionstart(session=session) initstate = 2 session.exitstatus = doit(config, session) or 0 except UsageError: session.exitstatus = ExitCode.USAGE_ERROR raise except Failed: session.exitstatus = ExitCode.TESTS_FAILED except (KeyboardInterrupt, exit.Exception): excinfo = _pytest._code.ExceptionInfo.from_current() exitstatus: Union[int, ExitCode] = ExitCode.INTERRUPTED if isinstance(excinfo.value, exit.Exception): if excinfo.value.returncode is not None: exitstatus = excinfo.value.returncode if initstate < 2: sys.stderr.write( f"{excinfo.typename}: {excinfo.value.msg}\n") config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = exitstatus except BaseException: session.exitstatus = ExitCode.INTERNAL_ERROR excinfo = _pytest._code.ExceptionInfo.from_current() try: config.notify_exception(excinfo, config.option) except exit.Exception as exc: if exc.returncode is not None: session.exitstatus = exc.returncode sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc)) else: if isinstance(excinfo.value, SystemExit): sys.stderr.write( "mainloop: caught unexpected SystemExit!\n") finally: # Explicitly break reference cycle. excinfo = None # type: ignore os.chdir(session.startpath) if initstate >= 2: try: config.hook.pytest_sessionfinish(session=session, exitstatus=session.exitstatus) except exit.Exception as exc: if exc.returncode is not None: session.exitstatus = exc.returncode sys.stderr.write("{}: {}\n".format(type(exc).__name__, exc)) config._ensure_unconfigure() return session.exitstatus
def test_basic_behavior(self, _sys_snapshot): from _pytest.config import Config option_dict = {"verbose": 444, "foo": "bar", "capture": "no"} args = ["a", "b"] config = Config.fromdictargs(option_dict, args) with pytest.raises(AssertionError): config.parse(["should refuse to parse again"]) assert config.option.verbose == 444 assert config.option.foo == "bar" assert config.option.capture == "no" assert config.args == args
def catch_warnings_for_item( config: Config, ihook, when: "Literal['config', 'collect', 'runtest']", item: Optional[Item], ) -> Generator[None, None, None]: """Context manager that catches warnings generated in the contained execution block. ``item`` can be None if we are not in the context of an item execution. Each warning captured triggers the ``pytest_warning_recorded`` hook. """ config_filters = config.getini("filterwarnings") cmdline_filters = config.known_args_namespace.pythonwarnings or [] with warnings.catch_warnings(record=True) as log: # mypy can't infer that record=True means log is not None; help it. assert log is not None if not sys.warnoptions: # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). warnings.filterwarnings("always", category=DeprecationWarning) warnings.filterwarnings("always", category=PendingDeprecationWarning) apply_warning_filters(config_filters, cmdline_filters) # apply filters from "filterwarnings" marks nodeid = "" if item is None else item.nodeid if item is not None: for mark in item.iter_markers(name="filterwarnings"): for arg in mark.args: warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) yield for warning_message in log: ihook.pytest_warning_captured.call_historic( kwargs=dict( warning_message=warning_message, when=when, item=item, location=None, ) ) ihook.pytest_warning_recorded.call_historic( kwargs=dict( warning_message=warning_message, nodeid=nodeid, when=when, location=None, ) )
def pytest_collection_modifyitems(config: Config, items: List[Function]) -> None: target = config.getoption('target', None) # use the `build` dir if not target: return # sort by file path and callspec.config # implement like this since this is a limitation of pytest, couldn't get fixture values while collecting # https://github.com/pytest-dev/pytest/discussions/9689 def _get_param_config(_item: Function) -> str: if hasattr(_item, 'callspec'): return _item.callspec.params.get('config', DEFAULT_SDKCONFIG) # type: ignore return DEFAULT_SDKCONFIG items.sort(key=lambda x: (os.path.dirname(x.path), _get_param_config(x))) # add markers for special markers for item in items: if 'supported_targets' in item_marker_names(item): for _target in SUPPORTED_TARGETS: item.add_marker(_target) if 'preview_targets' in item_marker_names(item): for _target in PREVIEW_TARGETS: item.add_marker(_target) if 'all_targets' in item_marker_names(item): for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]: item.add_marker(_target) # filter all the test cases with "--target" items[:] = [item for item in items if target in item_marker_names(item)] # filter all the test cases with cli option "config" if config.getoption('sdkconfig'): items[:] = [ item for item in items if _get_param_config(item) == config.getoption('sdkconfig') ]
def save_cli_coverage(cluster_obj: clusterlib.ClusterLib, pytest_config: Config) -> Optional[Path]: """Save CLI coverage info.""" cli_coverage_dir = pytest_config.getoption("--cli-coverage-dir") if not (cli_coverage_dir and cluster_obj.cli_coverage): return None json_file = Path( cli_coverage_dir ) / f"cli_coverage_{helpers.get_timestamped_rand_str()}.json" with open(json_file, "w", encoding="utf-8") as out_json: json.dump(cluster_obj.cli_coverage, out_json, indent=4) LOGGER.info(f"Coverage file saved to '{cli_coverage_dir}'.") return json_file
def retrieve_report_results_color(config: Config, color_option: Option, default: str) -> str: results_color = getattr(config.option, color_option.inioption_str) if not results_color: results_color = os.environ.get(color_option.envvar_str) if not results_color: results_color = config.getini(color_option.inioption_str) if not results_color: results_color = default return results_color
def pytest_report_header(config: Config) -> Optional[str]: """Display cachedir with --cache-show and if non-default.""" if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": assert config.cache is not None cachedir = config.cache._cachedir # TODO: evaluate generating upward relative paths # starting with .., ../.. if sensible try: displaypath = cachedir.relative_to(config.rootpath) except ValueError: displaypath = cachedir return f"cachedir: {displaypath}" return None
def pytest_configure(config: Config) -> None: if config.pluginmanager.has_plugin("vcr"): raise RuntimeError( "`pytest-recording` is incompatible with `pytest-vcr`. " "Please, uninstall `pytest-vcr` in order to use `pytest-recording`." ) config.addinivalue_line("markers", "vcr: Mark the test as using VCR.py.") config.addinivalue_line( "markers", "block_network: Block network access except for VCR recording.") config.addinivalue_line( "markers", "default_cassette: Override the default cassette name..") config.addinivalue_line( "markers", "allowed_hosts: List of regexes to match hosts to where connection must be allowed" ) network.install_pycurl_wrapper()
def __init__(self, config: Config) -> None: self.config = config try: self.fnpats = config.getini("python_files") except ValueError: self.fnpats = ["test_*.py", "*_test.py"] self.session = None # type: Optional[Session] self._rewritten_names = set() # type: Set[str] self._must_rewrite = set() # type: Set[str] # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, # which might result in infinite recursion (#3506) self._writing_pyc = False self._basenames_to_check_rewrite = {"conftest"} self._marked_for_rewrite_cache = {} # type: Dict[str, bool] self._session_paths_checked = False
def save_env_for_allure(pytest_config: Config) -> None: """Save environment info in a format for Allure.""" alluredir = pytest_config.getoption("--alluredir") if not alluredir: return alluredir = LAUNCH_PATH / alluredir metadata: Dict[str, Any] = pytest_config._metadata # type: ignore with open(alluredir / "environment.properties", "w+") as infile: for k, v in metadata.items(): if isinstance(v, dict): continue name = k.replace(" ", ".") infile.write(f"{name}={v}\n")
def pytest_terminal_summary(terminalreporter: TerminalReporter, config: PytestConfig) -> None: """ Add a summary to the end of the of the pytest output about snapshot statuses. """ tracker: SnapshotTracker = config.getoption(PACKAGE_TRACKER_OPTION) status_report = tracker.get_status_report() if status_report.any(): screen = terminalreporter screen.write_sep("=", "SnappierShot summary") screen.line(f"{status_report.passed: 3d} Snapshots Passed", green=True) screen.line(f"{status_report.failed: 3d} Snapshots Failed", red=True) screen.line(f"{status_report.written: 3d} Snapshots Written", cyan=True) screen.line(f"{status_report.unchecked: 3d} Snapshots Unchecked", yellow=True)
def main(connection): command, args, option_dict, *rest = connection.recv() config = Config.fromdictargs(option_dict, list(args)) config.args = args if command == "collect": config.option.collectonly = True config.option.verbose = 0 elif command == "run": if rest and rest[0]: config.option.file_or_dir = rest[0] config.args = rest[0] Session(config, connection).main()
def pytest_collection_modifyitems(session: Session, config: Config, items: list[Item]): ff = config.getoption("--filter-fixtures") if ff: # TODO: add more sophisticated combinations (=> like pytest -m and -k) # currently this is implemented in a way that any overlap between the fixture names will lead to selection filter_fixtures = set(ff.split(",")) selected = [] deselected = [] for item in items: if hasattr(item, "fixturenames") and filter_fixtures.isdisjoint(set(item.fixturenames)): deselected.append(item) else: selected.append(item) items[:] = selected config.hook.pytest_deselected(items=deselected)
def pytest_configure(config: Config) -> None: """ Patch pytest_bdd objects in current hook. """ injection_enabled: bool = config.getoption( _OptionName.ENABLE_INJECTION.as_variable) tw = _pytest.config.create_terminal_writer(config) if injection_enabled: logger.debug("Got %s flag.", _OptionName.ENABLE_INJECTION) try: logger.debug("Try to patch pytest objects...") get_proxy_manager().patch_pytest() logger.debug("Successfully patched pytest objects.") tw.line("Overhave injector successfully initialized.", green=True) except ValidationError as e: tw.line(f"Could not initialize Overhave injector!\n{str(e)}", red=True)
def pytest_configure(config: Config) -> None: import pdb if config.getvalue("trace"): config.pluginmanager.register(PdbTrace(), "pdbtrace") if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), "pdbinvoke") pytestPDB._saved.append( (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config)) pdb.set_trace = pytestPDB.set_trace pytestPDB._pluginmanager = config.pluginmanager pytestPDB._config = config # NOTE: not using pytest_unconfigure, since it might get called although # pytest_configure was not (if another plugin raises UsageError). def fin() -> None: ( pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, ) = pytestPDB._saved.pop() config._cleanup.append(fin)
def pytest_configure(config: Config) -> None: resultlog = config.option.resultlog # Prevent opening resultlog on worker nodes (xdist). if resultlog and not hasattr(config, "workerinput"): dirname = os.path.dirname(os.path.abspath(resultlog)) if not os.path.isdir(dirname): os.makedirs(dirname) logfile = open(resultlog, "w", 1) # line buffered config._store[resultlog_key] = ResultLog(config, logfile) config.pluginmanager.register(config._store[resultlog_key]) from _pytest.deprecated import RESULT_LOG from _pytest.warnings import _issue_warning_captured _issue_warning_captured(RESULT_LOG, config.hook, stacklevel=2)
def retrieve_report_zeros(config: Config) -> str: report_zeros = config.option.md_report_zeros if not report_zeros: report_zeros = os.environ.get(Option.MD_REPORT_ZEROS.envvar_str) if not report_zeros: report_zeros = config.getini(Option.MD_REPORT_ZEROS.inioption_str) if not report_zeros and _is_ci(): report_zeros = ZerosRender.EMPTY if not report_zeros: report_zeros = Default.ZEROS return report_zeros
def test_basic_behavior(self): from _pytest.config import Config option_dict = { 'verbose': 444, 'foo': 'bar', 'capture': 'no', } args = ['a', 'b'] config = Config.fromdictargs(option_dict, args) with pytest.raises(AssertionError): config.parse(['should refuse to parse again']) assert config.option.verbose == 444 assert config.option.foo == 'bar' assert config.option.capture == 'no' assert config.args == args
def pytest_collection_modifyitems(items: List[nodes.Item], config: Config) -> None: deselect_prefixes = tuple(config.getoption("deselect") or []) if not deselect_prefixes: return remaining = [] deselected = [] for colitem in items: if colitem.nodeid.startswith(deselect_prefixes): deselected.append(colitem) else: remaining.append(colitem) if deselected: config.hook.pytest_deselected(items=deselected) items[:] = remaining
def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int, config: Config): yield revision = os.getenv("GITHUB_SHA", "local") platform = os.getenv("PLATFORM", "local") terminalreporter.section("Benchmark results", "-") result = [] for test_report in terminalreporter.stats.get("passed", []): result_entry = [] for _, recorded_property in test_report.user_properties: terminalreporter.write("{}.{}: ".format(test_report.head_line, recorded_property["name"])) unit = recorded_property["unit"] value = recorded_property["value"] if unit == "MB": terminalreporter.write("{0:,.0f}".format(value), green=True) elif unit in ("s", "ms") and isinstance(value, float): terminalreporter.write("{0:,.3f}".format(value), green=True) elif isinstance(value, float): terminalreporter.write("{0:,.4f}".format(value), green=True) else: terminalreporter.write(str(value), green=True) terminalreporter.line(" {}".format(unit)) result_entry.append(recorded_property) result.append({ "suit": test_report.nodeid, "total_duration": test_report.duration, "data": result_entry, }) out_dir = config.getoption("out_dir") if out_dir is None: warnings.warn("no out dir provided to store performance test results") return if not result: warnings.warn("no results to store (no passed test suites)") return get_out_path(Path(out_dir), revision=revision).write_text( json.dumps({ "revision": revision, "platform": platform, "result": result }, indent=4))
def pytest_collection_modifyitems(items: List[Function], config: Config) -> None: fixtures: str = config.getoption("use_fixtures") if fixtures: selected_tests: List = [] deselected_tests: List = [] for fixture in fixtures: if fixture: for test in items: if fixture in test.fixturenames: selected_tests.append(test) else: deselected_tests.append(test) config.hook.pytest_deselected(items=deselected_tests) items[:] = selected_tests
def retrieve_verbosity_level(config: Config) -> int: verbosity_level = config.option.md_report_verbose if verbosity_level is not None and verbosity_level < 0: verbosity_level = None if verbosity_level is None: verbosity_level = _to_int( os.environ.get(Option.MD_REPORT_VERBOSE.envvar_str)) if verbosity_level is None: verbosity_level = _to_int( config.getini(Option.MD_REPORT_VERBOSE.inioption_str)) if verbosity_level is None: verbosity_level = config.option.verbose return verbosity_level
def pytest_configure(config: Config) -> None: if config.option.pastebin == "all": tr = config.pluginmanager.getplugin("terminalreporter") # If no terminal reporter plugin is present, nothing we can do here; # this can happen when this function executes in a worker node # when using pytest-xdist, for example. if tr is not None: # pastebin file will be UTF-8 encoded binary file. config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") oldwrite = tr._tw.write def tee_write(s, **kwargs): oldwrite(s, **kwargs) if isinstance(s, str): s = s.encode("utf-8") config.stash[pastebinfile_key].write(s) tr._tw.write = tee_write
def pytest_configure(config: Config) -> None: """Adds global variables and configuration attributes required by Ensembl's unit tests. `Pytest initialisation hook <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure>`_. Args: config: Access to configuration values, pluginmanager and plugin hooks. """ # Load server information server_url = sqlalchemy.engine.url.make_url(config.getoption('server')) # If password starts with "$", treat it as an environment variable that needs to be resolved if server_url.password and server_url.password.startswith('$'): server_url.password = os.environ[server_url.password[1:]] config.option.server = str(server_url) # Add global variables pytest.dbs_dir = Path(__file__).parents[3] / 'tests' / 'databases'
def construct_snappiershot_config( pytest_config: PytestConfig) -> snappiershot.Config: """ Attempt to construct a snappiershot.Config object from the pytest Config object. Uses various path locations stored in the pytest Config object to locate the any pyproject.toml files with snappiershot configurations. Args: pytest_config: The pytest Config object. """ sources = pytest_config.inifile, pytest_config.rootdir, pytest_config.invocation_dir for source in filter(None, sources): pyproject_toml = snappiershot.config.find_pyproject_toml(source) if pyproject_toml is not None: config = snappiershot.Config.from_pyproject(pyproject_toml) config.full_diff |= pytest_config.getoption( PACKAGE_FULL_DIFF_OPTION) return config return snappiershot.Config()
def pytest_collection_modifyitems(config: Config, items: List[Item]) -> None: target = config.getoption('target', None) # use the `build` dir if not target: return # add markers for special markers for item in items: if 'supported_targets' in item_marker_names(item): for _target in SUPPORTED_TARGETS: item.add_marker(_target) if 'preview_targets' in item_marker_names(item): for _target in PREVIEW_TARGETS: item.add_marker(_target) if 'all_targets' in item_marker_names(item): for _target in [*SUPPORTED_TARGETS, *PREVIEW_TARGETS]: item.add_marker(_target) # filter all the test cases with "--target" items[:] = [item for item in items if target in item_marker_names(item)]
def pytest_configure(config: Config) -> None: config.addinivalue_line( 'markers', 'xray(JIRA_ID): mark test with JIRA XRAY test case ID') if not config.getoption(JIRA_XRAY_FLAG) or hasattr(config, 'workerinput'): return xray_path = config.getoption(XRAYPATH) if xray_path: publisher = FilePublisher(xray_path) # type: ignore else: if config.getoption(JIRA_CLOUD): endpoint = TEST_EXECUTION_ENDPOINT_CLOUD else: endpoint = TEST_EXECUTION_ENDPOINT if config.getoption(JIRA_CLIENT_SECRET_AUTH): options = get_bearer_auth() auth: Union[AuthBase, Tuple[str, str]] = ClientSecretAuth( options['BASE_URL'], options['CLIENT_ID'], options['CLIENT_SECRET']) elif config.getoption(JIRA_API_KEY): options = get_api_key_auth() auth = ApiKeyAuth(options['API_KEY']) elif config.getoption(JIRA_TOKEN): options = get_api_token_auth() auth = TokenAuth(options['TOKEN']) else: options = get_basic_auth() auth = (options['USER'], options['PASSWORD']) publisher = XrayPublisher( # type: ignore base_url=options['BASE_URL'], endpoint=endpoint, auth=auth, verify=options['VERIFY']) plugin = XrayPlugin(config, publisher) config.pluginmanager.register(plugin=plugin, name=XRAY_PLUGIN)
def is_make_md_report(config: Config) -> bool: if config.option.help: return False make_report = config.option.md_report if make_report is None: try: make_report = Bool(os.environ.get(Option.MD_REPORT.envvar_str), strict_level=StrictLevel.MIN).convert() except TypeConversionError: make_report = None if make_report is None: make_report = config.getini(Option.MD_REPORT.inioption_str) if make_report is None: return False return make_report
def init_slave_session(channel, args, option_dict): import os, sys outchannel = channel.gateway.newchannel() sys.stdout = sys.stderr = outchannel.makefile('w') channel.send(outchannel) # prune sys.path to not contain relative paths newpaths = [] for p in sys.path: if p: if not os.path.isabs(p): p = os.path.abspath(p) newpaths.append(p) sys.path[:] = newpaths #fullwidth, hasmarkup = channel.receive() from _pytest.config import Config config = Config.fromdictargs(option_dict, list(args)) config.args = args from xdist.looponfail import SlaveFailSession SlaveFailSession(config, channel).main()
def test_inifilename(self, tmpdir): tmpdir.join("foo/bar.ini").ensure().write( _pytest._code.Source( """ [pytest] name = value """ ) ) from _pytest.config import Config inifile = "../../foo/bar.ini" option_dict = {"inifilename": inifile, "capture": "no"} cwd = tmpdir.join("a/b") cwd.join("pytest.ini").ensure().write( _pytest._code.Source( """ [pytest] name = wrong-value should_not_be_set = true """ ) ) with cwd.ensure(dir=True).as_cwd(): config = Config.fromdictargs(option_dict, ()) assert config.args == [str(cwd)] assert config.option.inifilename == inifile assert config.option.capture == "no" # this indicates this is the file used for getting configuration values assert config.inifile == inifile assert config.inicfg.get("name") == "value" assert config.inicfg.get("should_not_be_set") is None
def pytest_configure(config: Config): if config.getoption('--report-surefire'): plugin = SurefireRESTReporter(config) config.pluginmanager.register(plugin, 'surefirereporter')