Esempio n. 1
0
 def _prunetraceback(self, excinfo):
     Function._prunetraceback(self, excinfo)
     traceback = excinfo.traceback.filter(
         lambda x: not x.frame.f_globals.get("__unittest")
     )
     if traceback:
         excinfo.traceback = traceback
Esempio n. 2
0
 def _prunetraceback(self, excinfo):
     Function._prunetraceback(self, excinfo)
     traceback = excinfo.traceback.filter(
         lambda x: not x.frame.f_globals.get("__unittest")
     )
     if traceback:
         excinfo.traceback = traceback
Esempio n. 3
0
 def _prunetraceback(
         self, excinfo: _pytest._code.ExceptionInfo[BaseException]) -> None:
     Function._prunetraceback(self, excinfo)
     traceback = excinfo.traceback.filter(
         lambda x: not x.frame.f_globals.get("__unittest"))
     if traceback:
         excinfo.traceback = traceback
Esempio n. 4
0
def _add_xfail_markers(item: Function) -> None:
    """
    Mute flaky Integration Tests with custom pytest marker.
    Rationale for doing this is mentioned at DCOS-45308.
    """
    xfailflake_markers = [
        marker for marker in item.iter_markers() if marker.name == 'xfailflake'
    ]
    for xfailflake_marker in xfailflake_markers:
        assert 'reason' in xfailflake_marker.kwargs
        assert 'jira' in xfailflake_marker.kwargs
        assert xfailflake_marker.kwargs['jira'].startswith('DCOS')
        # Show the JIRA in the printed reason.
        xfailflake_marker.kwargs['reason'] = '{jira} - {reason}'.format(
            jira=xfailflake_marker.kwargs['jira'],
            reason=xfailflake_marker.kwargs['reason'],
        )
        date_text = xfailflake_marker.kwargs['since']
        try:
            datetime.datetime.strptime(date_text, '%Y-%m-%d')
        except ValueError:
            message = (
                'Incorrect date format for "since", should be YYYY-MM-DD')
            raise ValueError(message)

        # The marker is not "strict" unless that is explicitly stated.
        # That means that by default, no error is raised if the test passes or
        # fails.
        strict = xfailflake_marker.kwargs.get('strict', False)
        xfailflake_marker.kwargs['strict'] = strict
        xfail_marker = pytest.mark.xfail(
            *xfailflake_marker.args,
            **xfailflake_marker.kwargs,
        )
        item.add_marker(xfail_marker)
Esempio n. 5
0
def pytest_runtest_setup(item: Function) -> None:
    if ("skip_on_windows" in [mark.name for mark in item.iter_markers()]
            and sys.platform == "win32"):
        pytest.skip("cannot run on Windows")
    if "skip_on_ci" in [
            mark.name for mark in item.iter_markers()
    ] and os.environ.get("CI") in ["true", "True", "yes", "t", "1"]:
        pytest.skip("cannot run on CI")
Esempio n. 6
0
def pytest_runtest_protocol(item: Function, nextitem: Optional[Function]) -> bool:
    item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
    reports = runtestprotocol(item, nextitem=nextitem)
    item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
    if isinstance(item, SchemathesisFunction):
        for report in reports:
            if report.when == "call" and report.outcome == "passed":
                item.warn_if_stateful_responses_not_stored()
        item.add_stateful_tests()
    return True
Esempio n. 7
0
    def inner(func):
        item = Function(
            name=request.function.__name__ + '[]',
            parent=parent_test.parent,
            callobj=func,
        )
        nextitem = parent_test  # prevents pytest from tearing down module fixtures

        item.ihook.pytest_runtest_setup(item=item)
        item.ihook.pytest_runtest_call(item=item)
        item.ihook.pytest_runtest_teardown(item=item, nextitem=nextitem)
Esempio n. 8
0
def _add_function(item, original_name, mode, n, number, r, reps, template):
    return Function(
        template.format(name=original_name,
                        n=n + 1,
                        number=number,
                        r=r + 1,
                        reps=reps),
        item.parent,
        callobj=item.obj if mode == 'safe' else _runtest(number, item.obj),
        originalname=item.name,
        keywords={'_timeit': {
            'source': item,
            'rep': r,
            'mode': mode
        }})
Esempio n. 9
0
def ensure_all_fixers_have_a_test_under_pytest(
    config, items, patching_registry, _fail_fast=False
):
    """Call this pytest hook from a conftest.py to ensure your own test suite covers
    all your registered fixers, like so::

        def pytest_collection_modifyitems(config, items):
            from yourownpackage.registry import your_patching_registry
            from compat_patcher_core.scaffolding import ensure_all_fixers_have_a_test_under_pytest
            ensure_all_fixers_have_a_test_under_pytest(
                config=config, items=items, patching_registry=your_patching_registry
            )
    """
    import copy
    from _pytest.python import Function

    all_fixers = patching_registry.get_all_fixers()
    all_tests_names = [test.name for test in items]
    for fixer in all_fixers:
        expected_test_name = "test_{}".format(fixer["fixer_callable"].__name__)
        if expected_test_name not in all_tests_names:
            error_message = "No test written for {} fixer '{}'".format(
                fixer["fixer_family"].title(), fixer["fixer_callable"].__name__
            )

            def missing_fixer_test():
                raise RuntimeError(error_message)

            if _fail_fast:  # For testing only
                missing_fixer_test()
            mock_item = copy.copy(
                items[0]
            )  # We expect at least 1 test in the test suite, else it breaks...
            mock_item.parent.name = "test_{}_fixers.py".format(fixer["fixer_family"])
            setattr(
                mock_item.parent.obj,
                "MISSING_" + expected_test_name,
                missing_fixer_test,
            )
            items.append(
                Function(
                    name="MISSING_" + expected_test_name,
                    parent=mock_item.parent,
                    config=config,
                    session=mock_item.session,
                )
            )
Esempio n. 10
0
def pytest_runtest_setup(item: Function) -> None:
    if (
        "skip_on_windows" in [mark.name for mark in item.iter_markers()]
        and sys.platform == "win32"
    ):
        pytest.skip("cannot run on Windows")