def pytest_configure(config: Config) -> None: """ Register the "order" marker and configure the plugin, depending on the CLI options. """ provided_by_pytest_order = ( "Provided by pytest-order. " "See also: https://pytest-dev.github.io/pytest-order/" ) config_line = ( "order: specify ordering information for when tests should run " "in relation to one another. " + provided_by_pytest_order ) config.addinivalue_line("markers", config_line) # We need to dynamically add this `tryfirst` decorator to the plugin: # only when the CLI option is present should the decorator be added. # Thus, we manually run the decorator on the class function and # manually replace it. if config.getoption("indulgent_ordering"): wrapper = pytest.hookimpl(tryfirst=True) else: wrapper = pytest.hookimpl(trylast=True) setattr( OrderingPlugin, "pytest_collection_modifyitems", wrapper(modify_items) ) config.pluginmanager.register(OrderingPlugin(), "orderingplugin")
def pytest_configure(config: Config) -> None: try: settings = load_settings() except ValidationError as exc: field_errors = "\n".join([ f"\033[93m {e['loc'][0]}\033[0m - {e['msg']}" for e in exc.errors() ]) warnings.warn( UserWarning( "\033[1;31m Zebrunner plugin not configured properly because missing required config options.\n" "Add it to environment variables or .env file.\n" + field_errors + "\n" * 3)) return if settings.enabled: hooks: Union[PytestHooks, PytestXdistHooks] if config.pluginmanager.has_plugin("xdist") and any( [x == "-n" for x in config.invocation_params.args]): hooks = PytestXdistHooks() else: hooks = PytestHooks() config.pluginmanager.register(hooks) config.addinivalue_line( "markers", "maintainer(name): Email or nickname of test maintainer") config.addinivalue_line("markers", "label(name, value): Test label")
def pytest_configure(config: Config) -> None: """Configure pytest.""" config.addinivalue_line( # cspell:ignore addinivalue "markers", "cli_runner(charset:='utf-8', env=None, echo_stdin=False, mix_stderr=True): " "Pass kwargs to `click.testing.CliRunner` initialization.", )
def pytest_configure(config: Config) -> None: config.addinivalue_line("markers", "record_stdout: Mark the test as text record.") brotli_check() disable_rich() enable_debug()
def pytest_configure(config: PytestConfig): """This runs before tests start and adds values to the config.""" # Add marker to the config to prevent issues caused by: # https://github.com/pytest-dev/pytest/issues/4826 # Errors are now emitted when unknown marks are included config.addinivalue_line( "markers", "workflow('name', 'name2', ...): mark test to run only with the given " "workflow name or names. Also provides access to the workflow_dir " "fixture." ) # We need to add a workflow queue to some central variable. Instead of # using a global variable we add a value to the config. # Using setattr is not the nicest way of doing things, but having something # in the globally used config is the easiest and least hackish way to get # this going. workflow_queue = WorkflowQueue() setattr(config, "workflow_queue", workflow_queue) # Save which workflows are run and which are not. executed_workflows: Dict[str, str] = {} setattr(config, "executed_workflows", executed_workflows) # Save workflow for cleanup in this var. workflow_cleanup_dirs: List[str] = [] setattr(config, "workflow_cleanup_dirs", workflow_cleanup_dirs) # When multiple workflows are started they should all be set in the same # temporary directory # Running in a temporary directory will prevent the project repository # from getting filled up with test workflow output. # The temporary directory is produced using the tempfile stdlib. # If a basetemp is set by the user this is used as the temporary # directory. # Alternatively self.config._tmp_path_factory.getbasetemp() could be used # to create temporary dirs. But the comments in the pytest code # discourage this. Furthermore this creates directories in the following # form: `/tmp/pytest-of-$USER/pytest-<number>`. The number is generated # by pytest itself and increments each run. A maximum of 3 folders can # coexist. When more are detected, pytest will delete the oldest folders. # This can create problems when more than three instances of pytest with # pytest-workflow run under the same user. This is not uncommon in CI. # So this is why the native pytest `tmpdir` fixture is not used. basetemp = config.getoption("basetemp") workflow_temp_dir = ( Path(basetemp) if basetemp is not None else Path(tempfile.mkdtemp(prefix="pytest_workflow_"))) rootdir = Path(str(config.rootdir)) # Raise an error if the workflow temporary directory of the rootdir # (pytest's CWD). This will lead to infinite looping and copying. if is_in_dir(workflow_temp_dir, rootdir): raise ValueError(f"'{workflow_temp_dir}' is a subdirectory of " f"'{rootdir}'. Please select a --basetemp that is " f"not in pytest's current working directory.") setattr(config, "workflow_temp_dir", workflow_temp_dir)
def pytest_configure(config: Config): logger.debug("pytest_runtest_setup", config=config) # register an additional marker config.addinivalue_line("markers", "sosu(type): mark test to run with Sauce Labs") sosu_config = build_sosu_config(config.option, os.environ) setattr(config, "sosu", sosu_config)
def pytest_configure(config: Config) -> None: config.addinivalue_line( "markers", "compilation: Tests which use GNAT to compile Ada/SPARK code.") config.addinivalue_line( "markers", "verification: Tests which use GNATprove to formally verify SPARK code." )
def pytest_configure(config: Config) -> None: installed_packages = pkg_resources.working_set for item in list(installed_packages): if "brotli" in str(item).lower(): pytest.exit("Uninstall brotli before running tests") rich_config.disable_rich() def effect(df, *xargs, **kwargs): # pylint: disable=unused-argument print(df.to_string()) helper_funcs.print_rich_table = effect config.addinivalue_line("markers", "record_stdout: Mark the test as text record.") os.environ["DEBUG_MODE"] = "true"
def pytest_configure(config: Config) -> None: # register custom marks for aws services for svc_name in [ "aws", "cloudtrail", "ec2", "elasticsearch", "elb", "iam", "rds", "redshift", "s3", "sns", ]: config.addinivalue_line( "markers", "{}: mark tests against {}".format(svc_name, svc_name))
def pytest_configure(config: Config) -> None: config.addinivalue_line( 'markers', 'xray(JIRA_ID): mark test with JIRA XRAY test case ID') if not config.getoption(JIRA_XRAY_FLAG) or hasattr(config, 'workerinput'): return xray_path = config.getoption(XRAYPATH) if xray_path: publisher = FilePublisher(xray_path) # type: ignore else: if config.getoption(JIRA_CLOUD): endpoint = TEST_EXECUTION_ENDPOINT_CLOUD else: endpoint = TEST_EXECUTION_ENDPOINT if config.getoption(JIRA_CLIENT_SECRET_AUTH): options = get_bearer_auth() auth: Union[AuthBase, Tuple[str, str]] = ClientSecretAuth( options['BASE_URL'], options['CLIENT_ID'], options['CLIENT_SECRET']) elif config.getoption(JIRA_API_KEY): options = get_api_key_auth() auth = ApiKeyAuth(options['API_KEY']) elif config.getoption(JIRA_TOKEN): options = get_api_token_auth() auth = TokenAuth(options['TOKEN']) else: options = get_basic_auth() auth = (options['USER'], options['PASSWORD']) publisher = XrayPublisher( # type: ignore base_url=options['BASE_URL'], endpoint=endpoint, auth=auth, verify=options['VERIFY']) plugin = XrayPlugin(config, publisher) config.pluginmanager.register(plugin=plugin, name=XRAY_PLUGIN)
def pytest_configure(config: Config) -> None: if config.pluginmanager.has_plugin("vcr"): raise RuntimeError( "`pytest-recording` is incompatible with `pytest-vcr`. " "Please, uninstall `pytest-vcr` in order to use `pytest-recording`." ) config.addinivalue_line("markers", "vcr: Mark the test as using VCR.py.") config.addinivalue_line( "markers", "block_network: Block network access except for VCR recording.") config.addinivalue_line( "markers", "default_cassette: Override the default cassette name..") config.addinivalue_line( "markers", "allowed_hosts: List of regexes to match hosts to where connection must be allowed" ) network.install_pycurl_wrapper()
def pytest_configure(config: Config) -> None: if config.option.runxfail: # yay a hack import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass nop.Exception = xfail.Exception # type: ignore[attr-defined] # noqa: F821 setattr(pytest, "xfail", nop) config.addinivalue_line( "markers", "skip(reason=None): skip the given test function with an optional reason. " 'Example: skip(reason="no way of currently testing this") skips the ' "test.", ) config.addinivalue_line( "markers", "skipif(condition): skip the given test function if eval(condition) " "results in a True value. Evaluation happens within the " "module global context. Example: skipif('sys.platform == \"win32\"') " "skips the test if we are on the win32 platform. see " "https://docs.pytest.org/en/latest/skipping.html", ) config.addinivalue_line( "markers", "xfail(condition, reason=None, run=True, raises=None, strict=False): " "mark the test function as an expected failure if eval(condition) " "has a True value. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " "a true failure. See https://docs.pytest.org/en/latest/skipping.html", )
def pytest_configure(config: Config) -> None: if config.option.runxfail: # yay a hack import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass nop.Exception = xfail.Exception # type: ignore[attr-defined] setattr(pytest, "xfail", nop) config.addinivalue_line( "markers", "skip(reason=None): skip the given test function with an optional reason. " 'Example: skip(reason="no way of currently testing this") skips the ' "test.", ) config.addinivalue_line( "markers", "skipif(condition, ..., *, reason=...): " "skip the given test function if any of the conditions evaluate to True. " "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif", ) config.addinivalue_line( "markers", "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " "mark the test function as an expected failure if any of the conditions " "evaluate to True. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail", )
def pytest_configure(config: PyTestConfig) -> None: config.addinivalue_line("markers", "integration: mark integration tests") if not config.option.integration: config.option.markexpr = "not integration"
def pytest_configure(config: Config) -> None: """Introduces e2e marker.""" config.addinivalue_line("markers", "e2e: mark as end-to-end test.")
def pytest_configure(config: Config) -> None: config.addinivalue_line("markers", "record_stdout: Mark the test as text record.")
def pytest_configure(config: Config) -> None: config.addinivalue_line( "markers", f"{INTEGRATION_MARKER}: mark the test as an integration test")
def pytest_configure(config: Config) -> None: config.addinivalue_line("markers", "e2e: mark as end-to-end test.")
def _configure_requests_cache(config: Config) -> None: config.addinivalue_line( "markers", "requests_cache_disabled: Disable caching of requests.") config.addinivalue_line( "markers", "requests_cache_regenerate: Regenerate requested cached requests.")
def pytest_configure(config: Config) -> None: """Register `sphinx` marker with pytest.""" config.addinivalue_line("markers", "sphinx")
def pytest_configure(config: Config) -> None: for m in markers: config.addinivalue_line("markers", m)
def pytest_configure(config: Config): config.addinivalue_line( 'markers', 'default_user(user): modify the default user seerket uses for the test' )
def pytest_configure(config: Config) -> None: """Pytest configuration hook.""" config.addinivalue_line("markers", "e2e: mark as end-to-end test.")
def pytest_configure(config: Config) -> None: config.addinivalue_line("markers", "roundtrip: mark test as roundtrip")
def pytest_configure(config: Config): config.addinivalue_line( "markers", "nosession: mark test to run without a database session", )
def pytest_configure(config: Config) -> None: """General Pytest config.""" config.addinivalue_line( "markers", "cli-prod: mark as a live cli test (one that loads real data)." )
def pytest_configure(config: Config) -> None: config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_configure(config: Config) -> None: """Configure test types.""" config.addinivalue_line("markers", "prod: Computationally expensive, full tests.")
def pytest_configure(config: Config) -> None: config.addinivalue_line( "markers", "filterwarnings(warning): add a warning filter to the given test. " "see https://docs.pytest.org/en/stable/warnings.html#pytest-mark-filterwarnings ", )
def pytest_configure(config: Config): for marker_spec in MarkerSpec: config.addinivalue_line( "markers", f"{marker_spec.name}: {marker_spec.description}" )