def __init__( self, vtestnet: Container, # vtestnet container config: _pytest.config.Config, dockerctl: DockerCtl, docker_mounts: List[Mount]): endpoint = config.getoption('--endpoint') if endpoint: self.endpoint = endpoint else: ports = waitfor(vtestnet, ('NetworkSettings', 'Ports', '8888/tcp')) container_port = ports[0]['HostPort'] self.endpoint = f'http://localhost:{container_port}' self.skip_build = config.getoption('--skip-build') self.force_build = config.getoption('--force-build') self.custom_includes = config.getoption('--include') self.reporter = config.pluginmanager.get_plugin('terminalreporter') self.capture_manager = config.pluginmanager.get_plugin( 'capturemanager') self.vtestnet = vtestnet self.dockerctl = dockerctl self.docker_mounts = docker_mounts self.user_keys = dict() self.manifest = {} self.sys_contracts_path = '/usr/opt/telos.contracts' self.sys_contracts = [] self._sys_token_init = False
def pytest_configure(config: _pytest.config.Config) -> None: """Register the possible markers and change default error display. Display only the last error line without the traceback. """ config.addinivalue_line( "markers", 'slow: marks tests as slow (deselect with -m "not slow")' ) # show only the last line with the error message when displaying a # traceback if config.option.tbstyle == "auto": config.option.tbstyle = "line"
def pytest_collection_modifyitems( config: _pytest.config.Config, items: List[pytest.Item] ) -> None: job_config = config.getoption('--job') prev_junit_xml = config.getoption('--prev-junit-xml') dry_run = config.getoption('--jobs-dry-run') if prev_junit_xml is not None and job_config is None: pytest.exit('Cannot give the `--prev-junit-xml` flag without `--job`') if dry_run and job_config is None: pytest.exit('Cannot give the `--jobs-dry-run` flag without `--jobs`') if job_config is None: return None job_current = int(job_config.group(1)) - 1 jobs_total = int(job_config.group(2)) if jobs_total <= 0: pytest.exit( 'Cannot run 0 jobs ' + f'(--job-config {job_config.group(0)})' ) if job_current < 0 or job_current >= jobs_total: pytest.exit( 'Job index out of bounds ' + f'(--job-config {job_config.group(0)})' ) if prev_junit_xml is not None: if not os.path.isfile(prev_junit_xml): pytest.exit( f'The file {prev_junit_xml} given to ' + '--prev-junit-xml does not exist' ) else: timings = read_prev_timings(prev_junit_xml) else: timings = {} print( f"(job selection: {job_current+1}/{jobs_total} with " + f" {len(timings)} timings from {prev_junit_xml})" ) job_selection(config, items, timings, jobs_total, job_current, dry_run) return None
def pytest_collection_modifyitems(config: _pytest.config.Config, items: List): if config.getoption("--runslow"): # --runslow given in cli: do not skip slow tests return skip_slow = pytest.mark.skip(reason="need --runslow option to run") for item in items: if "slow" in item.keywords: item.add_marker(skip_slow)
def pytest_collection_modifyitems(config: _pytest.config.Config, items: List[_pytest.nodes.Item]) -> None: """Modify the collected test items to skip smoke tests.""" skips: Dict[str, Any] = {} if not config.getoption('--smoke'): # Do not include the smoke tests skip = pytest.mark.skip( reason='Smoke test. Run with --smoke or -m smoke to run') skips['smoke'] = skip if not config.getoption('--integration'): # Do not include the integration tests skip = pytest.mark.skip( reason='Integration test. ' 'Run with --integration or -m integration to run') skips['integration'] = skip for tag, skip in skips.items(): for item in items: if tag in item.keywords: item.add_marker(skip)
def _get_settings(config: _pytest.config.Config, path: Path) -> Settings: """Return the settings from global and local test_case.yaml. Args: config: Config from pytest. path: Path to a test case directory. Returns: The settings from the test case yaml. """ return Settings.from_local_file( Path(config.getoption("default_settings")), _get_parent_path(path) / SETTINGS_PATH.name, )
def _get_regression_path(config: _pytest.config.Config, fspath: py.path.local) -> Optional[Path]: """Return the path to the reference directory of a test case. None is returned if --regression-root is not passed to the CLI. Args: config: Config from pytest. fspath: Path to a test case directory. Returns: The path to the reference directory of the test case or None. """ regression_path = config.getoption("regression_root") if regression_path is None: return None return get_mirror_path(_get_parent_path(fspath), Path(regression_path).resolve(True))
def pytest_configure(config: _pytest.config.Config) -> None: config.addinivalue_line("markers", "slow: mark test as slow to run") config.addinivalue_line("markers", "fast: mark test as fast to run") config.addinivalue_line("markers", "all: all tests") config.addinivalue_line("markers", "asyncio: mark test as asyncio")
def pytest_configure(config: _pytest.config.Config) -> None: config.addinivalue_line("markers", "slow: mark test as slow to run") config.addinivalue_line("markers", "fast: mark test as fast to run") config.addinivalue_line("markers", "all: all tests") config.addinivalue_line("markers", "asyncio: mark test as asyncio") config.addinivalue_line("markers", "vendor: mark test as vendor library") config.addinivalue_line("markers", "libs: runs valid vendor tests") config.addinivalue_line("markers", "benchmark: runs benchmark tests") config.addinivalue_line("markers", "torch: runs torch tests") config.addinivalue_line("markers", "duet: runs duet notebook integration tests") config.addinivalue_line("markers", "grid: runs grid tests")
def pytest_configure(config: _pytest.config.Config): config.addinivalue_line("markers", "slow: mark test as slow to run")