def pytest_runtest_setup(item: pytest.Item): if tuple(item.iter_markers(name="indy")) and not INDY_FOUND: pytest.skip("test requires Indy support") if tuple(item.iter_markers(name="postgres")) and not POSTGRES_URL: pytest.skip("test requires Postgres support")
def pytest_runtest_setup(item: pytest.Item) -> None: """ See :func:`_pytest.hookspec.pytest_runtest_setup` for documentation. Also see the "`Writing Plugins <https://docs.pytest.org/en/latest/writing_plugins.html>`_" guide. """ if isinstance(item, _NanaimoItem): item.on_setup() display = _get_display(item.config) display.clear(display_default_message=False) display.write(item.name)
def pytest_runtest_setup(item: pytest.Item) -> None: """Called for each test item (class, individual tests). Ensures that altcoin tests are skipped, and that no test is skipped on both T1 and TT. """ if item.get_closest_marker("skip_t1") and item.get_closest_marker( "skip_t2"): raise RuntimeError("Don't skip tests for both trezors!") skip_altcoins = int(os.environ.get("TREZOR_PYTEST_SKIP_ALTCOINS", 0)) if item.get_closest_marker("altcoin") and skip_altcoins: pytest.skip("Skipping altcoin test")
def pytest_runtest_call(item: pytest.Item) -> None: pyjion.enable() pyjion.config(level=int(item.config.option.opt_level)) for mark in item.iter_markers(): if mark.name == "graph": pyjion.config(graph=True) pyjion.config(debug=True) item.runtest() pyjion.disable() info = pyjion.info(item.function) if not info.compiled: warnings.warn("{0} did not compile ({1})".format( item.function, str(info.compile_result))) pyjion.config(graph=False) gc.collect()
def get_order_number(test: pytest.Item) -> int: test_cls = getattr(test, "cls", None) if test_cls: # Beware, TestCase is a subclass of TransactionTestCase if issubclass(test_cls, TestCase): return 0 if issubclass(test_cls, TransactionTestCase): return 1 marker_db = test.get_closest_marker('django_db') if not marker_db: transaction = None else: transaction = validate_django_db(marker_db)[0] if transaction is True: return 1 fixtures = getattr(test, 'fixturenames', []) if "transactional_db" in fixtures: return 1 if transaction is False: return 0 if "db" in fixtures: return 0 return 2
def clusterrolebindings_from_marker(item: pytest.Item, namespace: str) -> List[ClusterRoleBinding]: """Create ClusterRoleBindings for the test case if the test case is marked with the `pytest.mark.clusterrolebinding` marker. Args: item: The pytest test item. namespace: The namespace of the test case. Return: The ClusterRoleBindings which were generated from the test case markers. """ clusterrolebindings = [] for mark in item.iter_markers(name='clusterrolebinding'): name = mark.args[0] subj_kind = mark.kwargs.get('subject_kind') subj_name = mark.kwargs.get('subject_name') subj = get_custom_rbac_subject(namespace, subj_kind, subj_name) if not subj: subj = get_default_rbac_subjects(namespace) clusterrolebindings.append(ClusterRoleBinding(client.V1ClusterRoleBinding( metadata=client.V1ObjectMeta( name=f'kubetest:{item.name}', ), role_ref=client.V1RoleRef( api_group='rbac.authorization.k8s.io', kind='ClusterRole', name=name, ), subjects=subj, ))) return clusterrolebindings
def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None: """Ignore doctest warning. Parameters ---------- item : pytest.Item pytest test item. path : str Module path to Python object, e.g. "pandas.core.frame.DataFrame.append". A warning will be filtered when item.name ends with in given path. So it is sufficient to specify e.g. "DataFrame.append". message : str Message to be filtered. """ if item.name.endswith(path): item.add_marker(pytest.mark.filterwarnings(f"ignore:{message}"))
def pytest_runtest_setup(item: pytest.Item): global STUBS if tuple(item.iter_markers(name="askar")) and not STUBS["askar"].found: pytest.skip("test requires Askar support") if tuple(item.iter_markers(name="indy")) and not STUBS["indy"].found: pytest.skip("test requires Indy support") if tuple(item.iter_markers(name="indy_credx")) and not STUBS["indy_credx"].found: pytest.skip("test requires Indy-Credx support") if tuple(item.iter_markers(name="indy_vdr")) and not STUBS["indy_vdr"].found: pytest.skip("test requires Indy-VDR support") if tuple(item.iter_markers(name="postgres")) and not POSTGRES_URL: pytest.skip("test requires Postgres support")
def _whitelisted_service_names(item: pytest.Item) -> Set[str]: """Returns a set of whitelisted service names configured by pytest marker diag_service_whitelist, which should be used like this: @pytest.mark.diag_service_whitelist(set('service1', 'service2')) def your_test_here(): ... Note that the diag_service_whitelist marker can be used on function, class, or module to be able to hierarchically configure the whitelist. """ if item.get_closest_marker(name='diag_service_whitelist') is None: return set() whitelisted_service_names: Set[str] = set() for mark in item.iter_markers(name='diag_service_whitelist'): whitelisted_service_names = whitelisted_service_names.union(mark.args[0]) return whitelisted_service_names
def get_test_suite_name(item: pytest.Item) -> str: """Returns the test suite name to use for a given test.""" # frameworks/template/tests/test_sanity.py => test_sanity_py # tests/test_sanity.py => test_sanity_py # use the class name as the suite name if item is a method if inspect.ismethod(item.obj): return str(os.path.basename(item.getparent(pytest.Class).name)).replace(".", "_") return str(os.path.basename(item.parent.name)).replace(".", "_")
def apply_manifests_from_marker(item: pytest.Item, meta: manager.TestMeta) -> None: """Load manifests and create the API objects for the specified files. This gets called for every `pytest.mark.applymanifests` marker on test cases. Once a manifest has been loaded, the API objects will be registered with the test cases' TestMeta. This allows some easier control via the "kube" fixture, such as waiting for all objects to be created. Args: item: The pytest test item. meta: The metainfo object for the marked test case. """ for mark in item.iter_markers(name='applymanifests'): dir_path = mark.args[0] files = mark.kwargs.get('files') # We expect the path specified to either be absolute or relative # from the test file. If the path is relative, add the directory # that the test file resides in as a prefix to the dir_path. if not os.path.isabs(dir_path): dir_path = os.path.abspath( os.path.join(os.path.dirname(item.fspath), dir_path) ) # If there are any files specified, we will only load those files. # Otherwise, we'll load everything in the directory. if files is None: objs = load_path(dir_path) else: objs = [] for f in files: objs.extend(load_file(os.path.join(dir_path, f))) # For each of the loaded Kubernetes resources, we'll want to wrap it # in the equivalent kubetest wrapper. If the resource does not have # an equivalent kubetest wrapper, error out. We cannot reliably create # the resource without our ApiObject wrapper semantics. wrapped = [] for obj in objs: found = False for klass in ApiObject.__subclasses__(): if obj.kind == klass.__name__: wrapped.append(klass(obj)) found = True break if not found: raise ValueError( f'Unable to match loaded object to an internal wrapper class: {obj}', ) meta.register_objects(wrapped)
def apply_manifest_from_marker(item: pytest.Item, meta: manager.TestMeta) -> None: """Load a manifest and create the API objects for the specified file. This gets called for every `pytest.mark.applymanifest` marker on test cases. Once the manifest has been loaded, the API object(s) will be registered with the test cases' TestMeta. This allows easier control via the "kube" fixture, such as waiting for all objects to be created. Args: item: The pytest test item. meta: The metainfo object for the marked test case. """ item_renderer = get_manifest_renderer_for_item(item) for mark in item.iter_markers(name="applymanifest"): path = mark.args[0] renderer = mark.kwargs.get("renderer", item_renderer) if not callable(renderer): raise TypeError("renderer given is not callable") # Normalize the path to be absolute. if not os.path.isabs(path): path = os.path.abspath(path) # Load the manifest context = dict(namespace=meta.ns, test_node_id=meta.node_id, test_name=meta.name) context_renderer = ContextRenderer(renderer, context) objs = load_file(path, renderer=context_renderer) # For each of the loaded Kubernetes resources, wrap it in the # equivalent kubetest wrapper. If the object does not yet have a # wrapper, error out. We cannot reliably create the resource # without our ApiObject wrapper semantics. wrapped = [] found = False for obj in objs: for klass in ApiObject.__subclasses__(): if obj.kind == klass.__name__: wrapped.append(klass(obj)) found = True break if not found: raise ValueError( f"Unable to match loaded object to an internal wrapper class: {obj}", ) meta.register_objects(wrapped)
def pytest_runtest_setup(item: pytest.Item) -> None: test_set = item.config.getoption("test_set", default=None) if "emulator" in item.fixturenames: if test_set == "parallel" or not item.config.getoption("with_emulator", default=None): pytest.skip("requires datastore emulator") else: # For tests without the emulator, prevent them from trying to create google cloud clients. item.google_auth_patcher = patch("google.auth.default") mock_google_auth = item.google_auth_patcher.start() mock_google_auth.side_effect = AssertionError( "Unit test may not instantiate a Google client. Please mock the appropriate client class inside this test " " (e.g. `patch('google.cloud.bigquery.Client')`).") if item.get_closest_marker("uses_db") is not None: if test_set == "parallel": pytest.skip( "[parallel tests] skipping because test requires database") else: if test_set == "not-parallel": pytest.skip( "[not-parallel tests] skipping because test does not require database or emulator" )
def get_manifest_renderer_for_item(item: pytest.Item) -> Renderer: """Return the callable for rendering a manifest template. Returns the renderer set via the closest `pytest.mark.render_manifests` marker or `kubetest.manifest.render` if no marker is found. Args: item: The pytest test item. Returns: A callable for rendering manifest templates into YAML documents. """ mark = item.get_closest_marker("render_manifests") return mark.args[0] if mark else render
def check_dcos_min_version_mark(item: pytest.Item): """Enforces the dcos_min_version pytest annotation, which should be used like this: @pytest.mark.dcos_min_version('1.10') def your_test_here(): ... In order for this annotation to take effect, this function must be called by a pytest_runtest_setup() hook. """ min_version_mark = item.get_marker("dcos_min_version") if min_version_mark: min_version = min_version_mark.args[0] message = "Feature only supported in DC/OS {} and up".format(min_version) if "reason" in min_version_mark.kwargs: message += ": {}".format(min_version_mark.kwargs["reason"]) if dcos_version_less_than(min_version): pytest.skip(message)
def check_dcos_min_version_mark(item: pytest.Item) -> None: """Enforces the dcos_min_version pytest annotation, which should be used like this: @pytest.mark.dcos_min_version('1.10') def your_test_here(): ... In order for this annotation to take effect, this function must be called by a pytest_runtest_setup() hook. """ min_version_mark = item.get_closest_marker("dcos_min_version") if min_version_mark: min_version = min_version_mark.args[0] message = "Feature only supported in DC/OS {} and up".format(min_version) if "reason" in min_version_mark.kwargs: message += ": {}".format(min_version_mark.kwargs["reason"]) if dcos_version_less_than(min_version): pytest.skip(message)
def deserialize_report(self, reporttype, report): from _pytest.runner import TestReport, CollectReport from pytest import Item if 'result' in report: newresult = [] for item in report['result']: item_obj = Item(item['name'], config=self.config, session=self.session) newresult.append(item_obj) report['result'] = newresult if reporttype == "test": return TestReport(**report) elif reporttype == "collect": return CollectReport(**report) else: raise RuntimeError("Invalid report type: {}".format(reporttype))
def pytest_runtest_setup(item: pytest.Item): missing_reqs = [] for mark in item.iter_markers(): if mark.name == 'colab' and NOTEBOOK_TYPE != 'colab': missing_reqs.append('Google Colab') elif mark.name == 'jupyter' and NOTEBOOK_TYPE != 'jupyter': missing_reqs.append('Jupyter notebooks') elif (mark.name == 'ipython_pre7' and (IPYTHON_VERSION == 'latest' or int(IPYTHON_VERSION.split('.')[0]) >= 7)): missing_reqs.append('IPython<7.0.0') elif (mark.name == 'ipython_post7' and IPYTHON_VERSION != 'latest' and int(IPYTHON_VERSION.split('.')[0]) < 7): missing_reqs.append('IPython>=7.0.0') if missing_reqs: pytest.skip(f"Test requires {', '.join(missing_reqs)}")
def get_test_item(item: Item): test_dict = {} node_id = item.nodeid process_tag = [] is_process_tag = False for marker in item.iter_markers(name="test_process_marks"): process_tag.append(marker.args[0]) is_process_tag = True if not is_process_tag: is_process_tag = False other_marks = get_my_custom_marks(item) test_dict.update({node_id: {"test_process_marks": process_tag, "is_process_tag": is_process_tag, "test_marks": other_marks, "test_name": item.name}}) return test_dict
def rolebindings_from_marker(item: pytest.Item, namespace: str) -> List[RoleBinding]: """Create RoleBindings for the test case if the test is marked with the `pytest.mark.rolebinding` marker. Args: item: The pytest test item. namespace: The namespace of the test case. Returns: The RoleBindings that were generated from the test case markers. """ rolebindings = [] for mark in item.iter_markers(name="rolebinding"): kind = mark.args[0] name = mark.args[1] subj_kind = mark.kwargs.get("subject_kind") subj_name = mark.kwargs.get("subject_name") subj = get_custom_rbac_subject(namespace, subj_kind, subj_name) if not subj: subj = get_default_rbac_subjects(namespace) rolebindings.append( RoleBinding( client.V1RoleBinding( metadata=client.V1ObjectMeta( name=f"kubetest:{item.name}", namespace=namespace, ), role_ref=client.V1RoleRef( api_group="rbac.authorization.k8s.io", kind=kind, name=name, ), subjects=subj, ))) return rolebindings
def pytest_runtest_setup(item: pytest.Item): private = Private() private.install(item) patched_modules = [] patcher_recipes = [] for mark in item.iter_markers(name='patch'): target = None if mark.args: target = mark.args[0] else: try: target = mark.kwargs['target'] except KeyError: raise ValueError("No patching target specified") target_parts = target.split('.') module_part = '.'.join(target_parts[:-1]) patched_modules.append(module_part) patcher_recipes.append((mark.args, mark.kwargs)) for recipe in patcher_recipes: patcher = unittest.mock.patch(*recipe[0], **recipe[1]) private.loaded_patchers.append(patcher) patcher.start() for cached_module in tuple(sys.modules.keys()): if cached_module in patched_modules: continue if not cached_module.startswith(default_reload): continue private.reloaded_modules.append(cached_module) importlib.reload(sys.modules[cached_module])
def pytest_itemcollected(item: pytest.Item) -> None: print(f"Caching {item!r}") item.obj = Memoized(item.obj, group=group)
def pytest_runtest_setup(item: pytest.Item): logger.debug("pytest_runtest_setup", item=item) sosu_markers = list(item.iter_markers(name="sosu")) if sosu_markers: logger.debug("sosu marker(s) found", sosu_markers=sosu_markers)