def evaluate_skip_marks(item: Item) -> Optional[Skip]: """Evaluate skip and skipif marks on item, returning Skip if triggered.""" for mark in item.iter_markers(name="skipif"): if "condition" not in mark.kwargs: conditions = mark.args else: conditions = (mark.kwargs["condition"], ) # Unconditional. if not conditions: reason = mark.kwargs.get("reason", "") return Skip(reason) # If any of the conditions are true. for condition in conditions: result, reason = evaluate_condition(item, mark, condition) if result: return Skip(reason) for mark in item.iter_markers(name="skip"): if "reason" in mark.kwargs: reason = mark.kwargs["reason"] elif mark.args: reason = mark.args[0] else: reason = "unconditional skip" return Skip(reason) return None
def evaluate_skip_marks(item: Item) -> Optional[Skip]: """Evaluate skip and skipif marks on item, returning Skip if triggered.""" for mark in item.iter_markers(name="skipif"): if "condition" not in mark.kwargs: conditions = mark.args else: conditions = (mark.kwargs["condition"], ) # Unconditional. if not conditions: reason = mark.kwargs.get("reason", "") return Skip(reason) # If any of the conditions are true. for condition in conditions: result, reason = evaluate_condition(item, mark, condition) if result: return Skip(reason) for mark in item.iter_markers(name="skip"): try: return Skip(*mark.args, **mark.kwargs) except TypeError as e: raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None return None
def _runtest_for_main(self, item: nodes.Item, when: str) -> Generator[None, None, None]: """Implements the internals of pytest_runtest_xxx() hook.""" with catching_logs(LogCaptureHandler(), formatter=self.formatter, level=self.log_level) as log_handler: if self.log_cli_handler: self.log_cli_handler.set_when(when) if item is None: yield # run the test return if not hasattr(item, "catch_log_handlers"): item.catch_log_handlers = { } # type: ignore[attr-defined] # noqa: F821 item.catch_log_handlers[ when] = log_handler # type: ignore[attr-defined] # noqa: F821 item.catch_log_handler = log_handler # type: ignore[attr-defined] # noqa: F821 try: yield # run test finally: if when == "teardown": del item.catch_log_handler # type: ignore[attr-defined] # noqa: F821 del item.catch_log_handlers # type: ignore[attr-defined] # noqa: F821 if self.print_logs: # Add a captured log section to the report. log = log_handler.stream.getvalue().strip() item.add_report_section(when, "log", log)
def pytest_runtest_setup(item: Item) -> None: env = Env() if item.get_closest_marker("k8s") and not env.bool("TEST_CLUSTER_ACTIVE", False): pytest.skip("test requires TEST_CLUSTER_ACTIVE to be true") if item.get_closest_marker("docker") and not env.bool("TEST_DOCKER_ACTIVE", False): pytest.skip("test requires TEST_DOCKER_ACTIVE to be true") if item.get_closest_marker("vault") and not env.bool("TEST_VAULT_ACTIVE", False): pytest.skip("test requires TEST_VAULT_ACTIVE to be true")
def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]: """Implements the internals of pytest_runtest_xxx() hook.""" with catching_logs(self.log_handler, level=self.log_level) as log_handler: log_handler.reset() item._store[catch_log_records_key][when] = log_handler.records item._store[catch_log_handler_key] = log_handler yield log = log_handler.stream.getvalue().strip() item.add_report_section(when, "log", log)
def item_capture(self, when: str, item: Item) -> Generator[None, None, None]: self.resume_global_capture() self.activate_fixture() try: yield finally: self.deactivate_fixture() self.suspend_global_capture(in_=False) out, err = self.read_global_capture() item.add_report_section(when, "stdout", out) item.add_report_section(when, "stderr", err)
def pytest_runtest_setup(item: Item) -> None: item._store[skipped_by_mark_key] = False skipped = evaluate_skip_marks(item) if skipped: item._store[skipped_by_mark_key] = True skip(skipped.reason) if not item.config.option.runxfail: item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) if xfailed and not xfailed.run: xfail("[NOTRUN] " + xfailed.reason)
def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport": """Create and fill a TestReport with standard item and call info.""" when = call.when # Remove "collect" from the Literal type -- only for collection calls. assert when != "collect" duration = call.duration keywords = {x: 1 for x in item.keywords} excinfo = call.excinfo sections = [] if not call.excinfo: outcome: Literal["passed", "failed", "skipped"] = "passed" longrepr: Union[ None, ExceptionInfo[BaseException], Tuple[str, int, str], str, TerminalRepr, ] = None else: if not isinstance(excinfo, ExceptionInfo): outcome = "failed" longrepr = excinfo elif isinstance(excinfo.value, skip.Exception): outcome = "skipped" r = excinfo._getreprcrash() if excinfo.value._use_item_location: filename, line = item.reportinfo()[:2] assert line is not None longrepr = str(filename), line + 1, r.message else: longrepr = (str(r.path), r.lineno, r.message) else: outcome = "failed" if call.when == "call": longrepr = item.repr_failure(excinfo) else: # exception in setup or teardown longrepr = item._repr_failure_py( excinfo, style=item.config.getoption("tbstyle", "auto") ) for rwhen, key, content in item._report_sections: sections.append((f"Captured {key} {rwhen}", content)) return cls( item.nodeid, item.location, keywords, outcome, longrepr, when, sections, duration, user_properties=item.user_properties, )
def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]: """Implements the internals of pytest_runtest_xxx() hook.""" log_handler = LogCaptureHandler() log_handler.setFormatter(self.formatter) with catching_logs(log_handler, level=self.log_level): item._store[catch_log_handlers_key][when] = log_handler item._store[catch_log_handler_key] = log_handler yield log = log_handler.stream.getvalue().strip() item.add_report_section(when, "log", log)
def finish_test(self, report: TestReport, item: Item) -> None: if zebrunner_context.test_is_active: self.authorize() xfail_markers = list(item.iter_markers("xfail")) fail_reason = None is_strict = False if xfail_markers: fail_reason = xfail_markers[0].kwargs.get("reason") is_strict = xfail_markers[0].kwargs.get("strict", False) if report.passed: status = TestStatus.PASSED if xfail_markers: if is_strict: status = TestStatus.FAILED fail_reason = report.longreprtext else: status = TestStatus.SKIPPED else: status = TestStatus.FAILED fail_reason = report.longreprtext if xfail_markers: status = TestStatus.SKIPPED fail_reason = xfail_markers[0].kwargs.get("reason") self.api.finish_test( zebrunner_context.test_run_id, zebrunner_context.test_id, FinishTestModel( result=status.value, reason=fail_reason, ), ) zebrunner_context.test = None return
def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None, None, None]: self.log_cli_handler.set_when("setup") empty: Dict[str, List[logging.LogRecord]] = {} item._store[caplog_records_key] = empty yield from self._runtest_for(item, "setup")
def pytest_runtest_setup(item: Item): """skip runalone test when global run""" if item.get_closest_marker("runalone"): name = re.match(r"test_([^\[]+)", item.name).group() if not item.config.args or not any( re.match(fr".*\:\:{name}", arg) for arg in item.config.args): pytest.skip(msg="should run alone")
def _add_item_hier_parts_parametrize(item, report_parts, tests_parts, rp_name=""): for mark in item.own_markers: if mark.name == 'parametrize': ch_index = item.nodeid.find("[") test_fullname = item.nodeid[:ch_index if ch_index > 0 else len( item.nodeid)] test_name = item.originalname rp_name += ("::" if rp_name else "") + test_name if test_fullname in tests_parts: item_test = tests_parts[test_fullname] else: item_test = Item(test_fullname, nodeid=test_fullname, session=item.session, config=item.session.config) item_test._rp_name = rp_name item_test.obj = item.obj item_test.keywords = item.keywords item_test.own_markers = item.own_markers item_test.parent = item.parent tests_parts[test_fullname] = item_test rp_name = "" report_parts.append(item_test) break return rp_name
def from_item_and_call(cls, item: Item, call: "CallInfo[None]") -> "TestReport": """ Factory method to create and fill a TestReport with standard item and call info. """ when = call.when # Remove "collect" from the Literal type -- only for collection calls. assert when != "collect" duration = call.duration keywords = {x: 1 for x in item.keywords} excinfo = call.excinfo sections = [] if not call.excinfo: outcome = "passed" # type: Literal["passed", "failed", "skipped"] # TODO: Improve this Any. longrepr = None # type: Optional[Any] else: if not isinstance(excinfo, ExceptionInfo): outcome = "failed" longrepr = excinfo elif excinfo.errisinstance(skip.Exception): outcome = "skipped" r = excinfo._getreprcrash() longrepr = (str(r.path), r.lineno, r.message) else: outcome = "failed" if call.when == "call": longrepr = item.repr_failure(excinfo) else: # exception in setup or teardown longrepr = item._repr_failure_py( excinfo, style=item.config.getoption("tbstyle", "auto")) for rwhen, key, content in item._report_sections: sections.append(("Captured {} {}".format(key, rwhen), content)) return cls( item.nodeid, item.location, keywords, outcome, longrepr, when, sections, duration, user_properties=item.user_properties, )
def pytest_runtest_call(item: Item) -> None: _update_current_test_var(item, "call") try: del sys.last_type del sys.last_value del sys.last_traceback except AttributeError: pass try: item.runtest() except Exception as e: # Store trace info to allow postmortem debugging sys.last_type = type(e) sys.last_value = e assert e.__traceback__ is not None # Skip *this* frame sys.last_traceback = e.__traceback__.tb_next raise e
def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None, None, None]: """Implement the internals of the pytest_runtest_xxx() hooks.""" with catching_logs( self.caplog_handler, level=self.log_level, ) as caplog_handler, catching_logs( self.report_handler, level=self.log_level, ) as report_handler: caplog_handler.reset() report_handler.reset() item.stash[caplog_records_key][when] = caplog_handler.records item.stash[caplog_handler_key] = caplog_handler yield log = report_handler.stream.getvalue().strip() item.add_report_section(when, "log", log)
def pytest_runtest_setup(item: Item) -> None: # Check if skip or skipif are specified as pytest marks item._store[skipped_by_mark_key] = False eval_skipif = MarkEvaluator(item, "skipif") if eval_skipif.istrue(): item._store[skipped_by_mark_key] = True skip(eval_skipif.getexplanation()) for skip_info in item.iter_markers(name="skip"): item._store[skipped_by_mark_key] = True if "reason" in skip_info.kwargs: skip(skip_info.kwargs["reason"]) elif skip_info.args: skip(skip_info.args[0]) else: skip("unconditional skip") item._store[evalxfail_key] = MarkEvaluator(item, "xfail") check_xfail_no_run(item)
def start_test(self, report: TestReport, item: Item) -> None: self.authorize() test = Test( name=item.name, file=item.nodeid.split("::")[1], maintainers=[ mark.args[0] for mark in item.iter_markers("maintainer") ], labels=[(str(mark.args[0]), str(mark.args[1])) for mark in item.iter_markers("label")], ) zebrunner_context.test = test if zebrunner_context.test_run_is_active: test.zebrunner_id = self.api.start_test( zebrunner_context.test_run_id, StartTestModel( name=test.name, class_name=test.file, method_name=test.name, maintainer=",".join(test.maintainers), labels=[{ "key": label[0], "value": label[1] } for label in test.labels], correlation_data=CorrelationDataModel( name=test.name).json(), ), ) if report.skipped and zebrunner_context.test_is_active: skip_markers = list( filter(lambda x: x.name == "skip", item.own_markers)) skip_reason = skip_markers[0].kwargs.get( "reason") if skip_markers else None self.api.finish_test( zebrunner_context.test_run_id, zebrunner_context.test_id, FinishTestModel(reason=skip_reason, result=TestStatus.SKIPPED.value), ) zebrunner_context.test = None
def runtestprotocol(item: Item, log: bool = True, nextitem: Optional[Item] = None) -> List[TestReport]: hasrequest = hasattr(item, "_request") if hasrequest and not item._request: # type: ignore[attr-defined] item._initrequest() # type: ignore[attr-defined] rep = call_and_report(item, "setup", log) reports = [rep] if rep.passed: if item.config.getoption("setupshow", False): show_test_item(item) if not item.config.getoption("setuponly", False): reports.append(call_and_report(item, "call", log)) reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) # After all teardown hooks have been called # want funcargs and request info to go away. if hasrequest: item._request = False # type: ignore[attr-defined] item.funcargs = None # type: ignore[attr-defined] return reports
def pytest_runtest_call(item: Item) -> None: call: CallInfo = yield # noqa # TODO: extremely dirty... maybe it would be better to find a way to fail the test itself instead? sm = item.funcargs.get("snapshot") if sm: verify = True paths = [] for m in item.iter_markers(name="skip_snapshot_verify"): verify = False paths = m.kwargs.get("paths", []) sm._assert_all(verify, paths)
def test_generic_path(testdir): from _pytest.main import Session config = testdir.parseconfig() session = Session(config) p1 = Node('a', config=config, session=session) # assert p1.fspath is None p2 = Node('B', parent=p1) p3 = Node('()', parent=p2) item = Item('c', parent=p3) res = generic_path(item) assert res == 'a.B().c' p0 = FSCollector('proj/test', config=config, session=session) p1 = FSCollector('proj/test/a', parent=p0) p2 = Node('B', parent=p1) p3 = Node('()', parent=p2) p4 = Node('c', parent=p3) item = Item('[1]', parent=p4) res = generic_path(item) assert res == 'test/a:B().c[1]'
def test_generic_path(testdir): from _pytest.main import Session config = testdir.parseconfig() session = Session(config) p1 = Node("a", config=config, session=session, nodeid="a") # assert p1.fspath is None p2 = Node("B", parent=p1) p3 = Node("()", parent=p2) item = Item("c", parent=p3) res = generic_path(item) assert res == "a.B().c" p0 = FSCollector("proj/test", config=config, session=session) p1 = FSCollector("proj/test/a", parent=p0) p2 = Node("B", parent=p1) p3 = Node("()", parent=p2) p4 = Node("c", parent=p3) item = Item("[1]", parent=p4) res = generic_path(item) assert res == "test/a:B().c[1]"
def pytest_runtest_makereport(item: Item, call: CallInfo[None]): outcome = yield rep = outcome.get_result() evalxfail = item._store.get(evalxfail_key, None) # unittest special case, see setting of unexpectedsuccess_key if unexpectedsuccess_key in item._store and rep.when == "call": reason = item._store[unexpectedsuccess_key] if reason: rep.longrepr = "Unexpected success: {}".format(reason) else: rep.longrepr = "Unexpected success" rep.outcome = "failed" elif item.config.option.runxfail: pass # don't interfere elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): assert call.excinfo.value.msg is not None rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" else: rep.outcome = "skipped" rep.wasxfail = evalxfail.getexplanation() elif call.when == "call": strict_default = item.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) explanation = evalxfail.getexplanation() if is_strict_xfail: rep.outcome = "failed" rep.longrepr = "[XPASS(strict)] {}".format(explanation) else: rep.outcome = "passed" rep.wasxfail = explanation elif ( item._store.get(skipped_by_mark_key, True) and rep.skipped and type(rep.longrepr) is tuple ): # skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display # the location of where the skip exception was raised within pytest _, _, reason = rep.longrepr filename, line = item.reportinfo()[:2] assert line is not None rep.longrepr = str(filename), line + 1, reason
def _add_item_hier_parts_parametrize(item, report_parts, tests_parts, rp_name=""): """ Add item to hierarchy of parents with params. :param item: pytest.Item :param report_parts: Parent reports :param tests_parts: test item parts :param rp_name: name of report :return: str rp_name """ for mark in item.own_markers: if mark.name == 'parametrize': ch_index = item.nodeid.find("[") test_fullname = item.nodeid[ :ch_index if ch_index > 0 else len( item.nodeid)] test_name = item.originalname rp_name += ("::" if rp_name else "") + test_name if test_fullname in tests_parts: item_test = tests_parts[test_fullname] else: if hasattr(Item, "from_parent"): item_test = Item.from_parent(parent=item, name=test_fullname, nodeid=test_fullname) else: item_test = Item(test_fullname, nodeid=test_fullname, session=item.session, config=item.session.config) item_test._rp_name = rp_name item_test.obj = item.obj item_test.keywords = item.keywords item_test.own_markers = item.own_markers item_test.parent = item.parent tests_parts[test_fullname] = item_test rp_name = "" report_parts.append(item_test) break return rp_name
def prepare(self, item: Item) -> None: """Setup objects along the collector chain to the item.""" # If a collector fails its setup, fail its entire subtree of items. # The setup is not retried for each item - the same exception is used. for col, (finalizers, prepare_exc) in self.stack.items(): if prepare_exc: raise prepare_exc needed_collectors = item.listchain() for col in needed_collectors[len(self.stack):]: assert col not in self.stack self.stack[col] = ([col.teardown], None) try: col.setup() except TEST_OUTCOME as e: self.stack[col] = (self.stack[col][0], e) raise e
def prepare(self, item: Item) -> None: """Setup objects along the collector chain to the item.""" # If a collector fails its setup, fail its entire subtree of items. # The setup is not retried for each item - the same exception is used. for col in self.stack: prepare_exc = col._store.get(self._prepare_exc_key, None) if prepare_exc: raise prepare_exc needed_collectors = item.listchain() for col in needed_collectors[len(self.stack) :]: assert col not in self.stack self.stack[col] = [col.teardown] try: col.setup() except TEST_OUTCOME as e: col._store[self._prepare_exc_key] = e raise e
def pytest_runtest_makereport(item: Item, call: CallInfo[None]): outcome = yield rep = outcome.get_result() xfailed = item._store.get(xfailed_key, None) # unittest special case, see setting of unexpectedsuccess_key if unexpectedsuccess_key in item._store and rep.when == "call": reason = item._store[unexpectedsuccess_key] if reason: rep.longrepr = f"Unexpected success: {reason}" else: rep.longrepr = "Unexpected success" rep.outcome = "failed" elif item.config.option.runxfail: pass # don't interfere elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): assert call.excinfo.value.msg is not None rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif not rep.skipped and xfailed: if call.excinfo: raises = xfailed.raises if raises is not None and not isinstance(call.excinfo.value, raises): rep.outcome = "failed" else: rep.outcome = "skipped" rep.wasxfail = xfailed.reason elif call.when == "call": if xfailed.strict: rep.outcome = "failed" rep.longrepr = "[XPASS(strict)] " + xfailed.reason else: rep.outcome = "passed" rep.wasxfail = xfailed.reason if (item._store.get(skipped_by_mark_key, True) and rep.skipped and type(rep.longrepr) is tuple): # Skipped by mark.skipif; change the location of the failure # to point to the item definition, otherwise it will display # the location of where the skip exception was raised within pytest. _, _, reason = rep.longrepr filename, line = item.reportinfo()[:2] assert line is not None rep.longrepr = str(filename), line + 1, reason
def setup(self, item: Item) -> None: """Setup objects along the collector chain to the item.""" needed_collectors = item.listchain() # If a collector fails its setup, fail its entire subtree of items. # The setup is not retried for each item - the same exception is used. for col, (finalizers, exc) in self.stack.items(): assert col in needed_collectors, "previous item was not torn down properly" if exc: raise exc for col in needed_collectors[len(self.stack):]: assert col not in self.stack # Push onto the stack. self.stack[col] = ([col.teardown], None) try: col.setup() except TEST_OUTCOME as exc: self.stack[col] = (self.stack[col][0], exc) raise exc
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: """Evaluate xfail marks on item, returning Xfail if triggered.""" for mark in item.iter_markers(name="xfail"): run = mark.kwargs.get("run", True) strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) raises = mark.kwargs.get("raises", None) if "condition" not in mark.kwargs: conditions = mark.args else: conditions = (mark.kwargs["condition"], ) # Unconditional. if not conditions: reason = mark.kwargs.get("reason", "") return Xfail(reason, run, strict, raises) # If any of the conditions are true. for condition in conditions: result, reason = evaluate_condition(item, mark, condition) if result: return Xfail(reason, run, strict, raises) return None
def item_marker_names(item: Item) -> List[str]: return [marker.name for marker in item.iter_markers()]