def evaluate_skip_marks(item: Item) -> Optional[Skip]:
    """Evaluate skip and skipif marks on item, returning Skip if triggered."""
    for mark in item.iter_markers(name="skipif"):
        if "condition" not in mark.kwargs:
            conditions = mark.args
        else:
            conditions = (mark.kwargs["condition"], )

        # Unconditional.
        if not conditions:
            reason = mark.kwargs.get("reason", "")
            return Skip(reason)

        # If any of the conditions are true.
        for condition in conditions:
            result, reason = evaluate_condition(item, mark, condition)
            if result:
                return Skip(reason)

    for mark in item.iter_markers(name="skip"):
        try:
            return Skip(*mark.args, **mark.kwargs)
        except TypeError as e:
            raise TypeError(str(e) +
                            " - maybe you meant pytest.mark.skipif?") from None

    return None
示例#2
0
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
    """Evaluate skip and skipif marks on item, returning Skip if triggered."""
    for mark in item.iter_markers(name="skipif"):
        if "condition" not in mark.kwargs:
            conditions = mark.args
        else:
            conditions = (mark.kwargs["condition"], )

        # Unconditional.
        if not conditions:
            reason = mark.kwargs.get("reason", "")
            return Skip(reason)

        # If any of the conditions are true.
        for condition in conditions:
            result, reason = evaluate_condition(item, mark, condition)
            if result:
                return Skip(reason)

    for mark in item.iter_markers(name="skip"):
        if "reason" in mark.kwargs:
            reason = mark.kwargs["reason"]
        elif mark.args:
            reason = mark.args[0]
        else:
            reason = "unconditional skip"
        return Skip(reason)

    return None
    def finish_test(self, report: TestReport, item: Item) -> None:
        if zebrunner_context.test_is_active:
            self.authorize()
            xfail_markers = list(item.iter_markers("xfail"))
            fail_reason = None
            is_strict = False
            if xfail_markers:
                fail_reason = xfail_markers[0].kwargs.get("reason")
                is_strict = xfail_markers[0].kwargs.get("strict", False)

            if report.passed:
                status = TestStatus.PASSED
                if xfail_markers:
                    if is_strict:
                        status = TestStatus.FAILED
                        fail_reason = report.longreprtext
                    else:
                        status = TestStatus.SKIPPED
            else:
                status = TestStatus.FAILED
                fail_reason = report.longreprtext
                if xfail_markers:
                    status = TestStatus.SKIPPED
                    fail_reason = xfail_markers[0].kwargs.get("reason")

            self.api.finish_test(
                zebrunner_context.test_run_id,
                zebrunner_context.test_id,
                FinishTestModel(
                    result=status.value,
                    reason=fail_reason,
                ),
            )
            zebrunner_context.test = None
            return
    def start_test(self, report: TestReport, item: Item) -> None:
        self.authorize()
        test = Test(
            name=item.name,
            file=item.nodeid.split("::")[1],
            maintainers=[
                mark.args[0] for mark in item.iter_markers("maintainer")
            ],
            labels=[(str(mark.args[0]), str(mark.args[1]))
                    for mark in item.iter_markers("label")],
        )
        zebrunner_context.test = test

        if zebrunner_context.test_run_is_active:
            test.zebrunner_id = self.api.start_test(
                zebrunner_context.test_run_id,
                StartTestModel(
                    name=test.name,
                    class_name=test.file,
                    method_name=test.name,
                    maintainer=",".join(test.maintainers),
                    labels=[{
                        "key": label[0],
                        "value": label[1]
                    } for label in test.labels],
                    correlation_data=CorrelationDataModel(
                        name=test.name).json(),
                ),
            )

        if report.skipped and zebrunner_context.test_is_active:
            skip_markers = list(
                filter(lambda x: x.name == "skip", item.own_markers))
            skip_reason = skip_markers[0].kwargs.get(
                "reason") if skip_markers else None
            self.api.finish_test(
                zebrunner_context.test_run_id,
                zebrunner_context.test_id,
                FinishTestModel(reason=skip_reason,
                                result=TestStatus.SKIPPED.value),
            )
            zebrunner_context.test = None
示例#5
0
def pytest_runtest_call(item: Item) -> None:
    call: CallInfo = yield  # noqa

    # TODO: extremely dirty... maybe it would be better to find a way to fail the test itself instead?
    sm = item.funcargs.get("snapshot")
    if sm:
        verify = True
        paths = []
        for m in item.iter_markers(name="skip_snapshot_verify"):
            verify = False
            paths = m.kwargs.get("paths", [])
        sm._assert_all(verify, paths)
示例#6
0
def pytest_runtest_setup(item: Item) -> None:
    # Check if skip or skipif are specified as pytest marks
    item._store[skipped_by_mark_key] = False
    eval_skipif = MarkEvaluator(item, "skipif")
    if eval_skipif.istrue():
        item._store[skipped_by_mark_key] = True
        skip(eval_skipif.getexplanation())

    for skip_info in item.iter_markers(name="skip"):
        item._store[skipped_by_mark_key] = True
        if "reason" in skip_info.kwargs:
            skip(skip_info.kwargs["reason"])
        elif skip_info.args:
            skip(skip_info.args[0])
        else:
            skip("unconditional skip")

    item._store[evalxfail_key] = MarkEvaluator(item, "xfail")
    check_xfail_no_run(item)
示例#7
0
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
    """Evaluate xfail marks on item, returning Xfail if triggered."""
    for mark in item.iter_markers(name="xfail"):
        run = mark.kwargs.get("run", True)
        strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
        raises = mark.kwargs.get("raises", None)
        if "condition" not in mark.kwargs:
            conditions = mark.args
        else:
            conditions = (mark.kwargs["condition"], )

        # Unconditional.
        if not conditions:
            reason = mark.kwargs.get("reason", "")
            return Xfail(reason, run, strict, raises)

        # If any of the conditions are true.
        for condition in conditions:
            result, reason = evaluate_condition(item, mark, condition)
            if result:
                return Xfail(reason, run, strict, raises)

    return None
示例#8
0
def item_marker_names(item: Item) -> List[str]:
    return [marker.name for marker in item.iter_markers()]
示例#9
0
 def for_item(cls, item: Item) -> Container["MarkerSpec"]:
     found = []
     for marker in item.iter_markers():
         with contextlib.suppress(KeyError):
             found.append(cls[marker.name])
     return found