Exemplo n.º 1
0
def test_removed_in_x_warning_as_error(pytester: Pytester,
                                       change_default) -> None:
    """This ensures that PytestRemovedInXWarnings raised by pytest are turned into errors.

    This test should be enabled as part of each major release, and skipped again afterwards
    to ensure our deprecations are turning into warnings as expected.
    """
    pytester.makepyfile("""
        import warnings, pytest
        def test():
            warnings.warn(pytest.PytestRemovedIn8Warning("some warning"))
    """)
    if change_default == "ini":
        pytester.makeini("""
            [pytest]
            filterwarnings =
                ignore::pytest.PytestRemovedIn8Warning
        """)

    args = (("-Wignore::pytest.PytestRemovedIn8Warning", )
            if change_default == "cmdline" else ())
    result = pytester.runpytest(*args)
    if change_default is None:
        result.stdout.fnmatch_lines(["* 1 failed in *"])
    else:
        assert change_default in ("ini", "cmdline")
        result.stdout.fnmatch_lines(["* 1 passed in *"])
Exemplo n.º 2
0
    def test_packages(self, pytester: Pytester) -> None:
        """Regression test for #7758.

        The particular issue here was that Package nodes were included in the
        filtering, being themselves Modules for the __init__.py, even if they
        had failed Modules in them.

        The tests includes a test in an __init__.py file just to make sure the
        fix doesn't somehow regress that, it is not critical for the issue.
        """
        pytester.makepyfile(
            **{
                "__init__.py": "",
                "a/__init__.py": "def test_a_init(): assert False",
                "a/test_one.py": "def test_1(): assert False",
                "b/__init__.py": "",
                "b/test_two.py": "def test_2(): assert False",
            }, )
        pytester.makeini("""
            [pytest]
            python_files = *.py
            """)
        result = pytester.runpytest()
        result.assert_outcomes(failed=3)
        result = pytester.runpytest("--lf")
        result.assert_outcomes(failed=3)
Exemplo n.º 3
0
def test_log_in_hooks(pytester: Pytester) -> None:
    log_file = str(pytester.path.joinpath("pytest.log"))

    pytester.makeini("""
        [pytest]
        log_file={}
        log_file_level = INFO
        log_cli=true
        """.format(log_file))
    pytester.makeconftest("""
        import logging

        def pytest_runtestloop(session):
            logging.info('runtestloop')

        def pytest_sessionstart(session):
            logging.info('sessionstart')

        def pytest_sessionfinish(session, exitstatus):
            logging.info('sessionfinish')
    """)
    result = pytester.runpytest()
    result.stdout.fnmatch_lines(
        ["*sessionstart*", "*runtestloop*", "*sessionfinish*"])
    with open(log_file) as rfh:
        contents = rfh.read()
        assert "sessionstart" in contents
        assert "runtestloop" in contents
        assert "sessionfinish" in contents
Exemplo n.º 4
0
def test_hide_pytest_internal_warnings(pytester: Pytester,
                                       ignore_pytest_warnings) -> None:
    """Make sure we can ignore internal pytest warnings using a warnings filter."""
    pytester.makepyfile("""
        import pytest
        import warnings

        warnings.warn(pytest.PytestWarning("some internal warning"))

        def test_bar():
            pass
    """)
    if ignore_pytest_warnings == "ini":
        pytester.makeini("""
            [pytest]
            filterwarnings = ignore::pytest.PytestWarning
        """)
    args = (["-W", "ignore::pytest.PytestWarning"]
            if ignore_pytest_warnings == "cmdline" else [])
    result = pytester.runpytest(*args)
    if ignore_pytest_warnings != "no":
        assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()
    else:
        result.stdout.fnmatch_lines([
            "*== %s ==*" % WARNINGS_SUMMARY_HEADER,
            "*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning",
            "* 1 passed, 1 warning *",
        ])
Exemplo n.º 5
0
    def test_early_ignored_attributes(self, pytester: Pytester) -> None:
        """Builtin attributes should be ignored early on, even if
        configuration would otherwise allow them.

        This tests a performance optimization, not correctness, really,
        although it tests PytestCollectionWarning is not raised, while
        it would have been raised otherwise.
        """
        pytester.makeini(
            """
            [pytest]
            python_classes=*
            python_functions=*
        """
        )
        pytester.makepyfile(
            """
            class TestEmpty:
                pass
            test_empty = TestEmpty()
            def test_real():
                pass
        """
        )
        items, rec = pytester.inline_genitems()
        assert rec.ret == 0
        assert len(items) == 1
Exemplo n.º 6
0
def test_collection_logging_to_file(pytester: Pytester) -> None:
    log_file = str(pytester.path.joinpath("pytest.log"))

    pytester.makeini("""
        [pytest]
        log_file={}
        log_file_level = INFO
        """.format(log_file))

    pytester.makepyfile("""
        import logging

        logging.getLogger().info("Normal message")

        def test_simple():
            logging.getLogger().debug("debug message in test_simple")
            logging.getLogger().info("info message in test_simple")
    """)

    result = pytester.runpytest()

    result.stdout.no_fnmatch_line("*--- live log collection ---*")

    assert result.ret == 0
    assert os.path.isfile(log_file)
    with open(log_file, encoding="utf-8") as rfh:
        contents = rfh.read()
        assert "Normal message" in contents
        assert "debug message in test_simple" not in contents
        assert "info message in test_simple" in contents
Exemplo n.º 7
0
def test_log_cli_auto_enable(pytester: Pytester, cli_args: str) -> None:
    """Check that live logs are enabled if --log-level or --log-cli-level is passed on the CLI.
    It should not be auto enabled if the same configs are set on the INI file.
    """
    pytester.makepyfile("""
        import logging

        def test_log_1():
            logging.info("log message from test_log_1 not to be shown")
            logging.warning("log message from test_log_1")

    """)
    pytester.makeini("""
        [pytest]
        log_level=INFO
        log_cli_level=INFO
    """)

    result = pytester.runpytest(cli_args)
    stdout = result.stdout.str()
    if cli_args == "--log-cli-level=WARNING":
        result.stdout.fnmatch_lines([
            "*::test_log_1 ",
            "*-- live log call --*",
            "*WARNING*log message from test_log_1*",
            "PASSED *100%*",
            "=* 1 passed in *=",
        ])
        assert "INFO" not in stdout
    else:
        result.stdout.fnmatch_lines(
            ["*test_log_cli_auto_enable*100%*", "=* 1 passed in *="])
        assert "INFO" not in stdout
        assert "WARNING" not in stdout
Exemplo n.º 8
0
def test_customized_python_discovery(pytester: Pytester) -> None:
    pytester.makeini(
        """
        [pytest]
        python_files=check_*.py
        python_classes=Check
        python_functions=check
    """
    )
    p = pytester.makepyfile(
        """
        def check_simple():
            pass
        class CheckMyApp(object):
            def check_meth(self):
                pass
    """
    )
    p2 = p.with_name(p.name.replace("test", "check"))
    p.rename(p2)
    result = pytester.runpytest("--collect-only", "-s")
    result.stdout.fnmatch_lines(
        ["*check_customized*", "*check_simple*", "*CheckMyApp*", "*check_meth*"]
    )

    result = pytester.runpytest()
    assert result.ret == 0
    result.stdout.fnmatch_lines(["*2 passed*"])
Exemplo n.º 9
0
def test_log_cli_default_level_multiple_tests(pytester: Pytester,
                                              request: FixtureRequest) -> None:
    """Ensure we reset the first newline added by the live logger between tests"""
    filename = request.node.name + ".py"
    pytester.makepyfile("""
        import logging

        def test_log_1():
            logging.warning("log message from test_log_1")

        def test_log_2():
            logging.warning("log message from test_log_2")
    """)
    pytester.makeini("""
        [pytest]
        log_cli=true
    """)

    result = pytester.runpytest()
    result.stdout.fnmatch_lines([
        f"{filename}::test_log_1 ",
        "*WARNING*log message from test_log_1*",
        "PASSED *50%*",
        f"{filename}::test_log_2 ",
        "*WARNING*log message from test_log_2*",
        "PASSED *100%*",
        "=* 2 passed in *=",
    ])
Exemplo n.º 10
0
def test_log_cli_ini_level(pytester: Pytester) -> None:
    pytester.makeini("""
        [pytest]
        log_cli=true
        log_cli_level = INFO
        """)
    pytester.makepyfile("""
        import pytest
        import logging
        def test_log_cli(request):
            plugin = request.config.pluginmanager.getplugin('logging-plugin')
            assert plugin.log_cli_handler.level == logging.INFO
            logging.getLogger('catchlog').debug("This log message won't be shown")
            logging.getLogger('catchlog').info("This log message will be shown")
            print('PASSED')
    """)

    result = pytester.runpytest("-s")

    # fnmatch_lines does an assertion internally
    result.stdout.fnmatch_lines([
        "*test_log_cli_ini_level.py* This log message will be shown",
        "PASSED",  # 'PASSED' on its own line because the log message prints a new line
    ])
    result.stdout.no_fnmatch_line("*This log message won't be shown*")

    # make sure that that we get a '0' exit code for the testsuite
    assert result.ret == 0
Exemplo n.º 11
0
def test_log_cli_default_level(pytester: Pytester) -> None:
    # Default log file level
    pytester.makepyfile("""
        import pytest
        import logging
        def test_log_cli(request):
            plugin = request.config.pluginmanager.getplugin('logging-plugin')
            assert plugin.log_cli_handler.level == logging.NOTSET
            logging.getLogger('catchlog').info("INFO message won't be shown")
            logging.getLogger('catchlog').warning("WARNING message will be shown")
    """)
    pytester.makeini("""
        [pytest]
        log_cli=true
    """)

    result = pytester.runpytest()

    # fnmatch_lines does an assertion internally
    result.stdout.fnmatch_lines([
        "test_log_cli_default_level.py::test_log_cli ",
        "WARNING*test_log_cli_default_level.py* message will be shown*",
    ])
    result.stdout.no_fnmatch_line("*INFO message won't be shown*")
    # make sure that that we get a '0' exit code for the testsuite
    assert result.ret == 0
Exemplo n.º 12
0
def test_caplog_can_override_global_log_level(pytester: Pytester) -> None:
    pytester.makepyfile("""
        import pytest
        import logging
        def test_log_level_override(request, caplog):
            logger = logging.getLogger('catchlog')
            plugin = request.config.pluginmanager.getplugin('logging-plugin')
            assert plugin.log_level == logging.WARNING

            logger.info("INFO message won't be shown")

            caplog.set_level(logging.INFO, logger.name)

            with caplog.at_level(logging.DEBUG, logger.name):
                logger.debug("DEBUG message will be shown")

            logger.debug("DEBUG message won't be shown")

            with caplog.at_level(logging.CRITICAL, logger.name):
                logger.warning("WARNING message won't be shown")

            logger.debug("DEBUG message won't be shown")
            logger.info("INFO message will be shown")

            assert "message won't be shown" not in caplog.text
    """)
    pytester.makeini("""
        [pytest]
        log_level=WARNING
    """)

    result = pytester.runpytest()
    assert result.ret == 0
Exemplo n.º 13
0
def test_parameterset_for_parametrize_marks(
    pytester: Pytester, mark: Optional[str]
) -> None:
    if mark is not None:
        pytester.makeini(
            """
        [pytest]
        {}={}
        """.format(
                EMPTY_PARAMETERSET_OPTION, mark
            )
        )

    config = pytester.parseconfig()
    from _pytest.mark import pytest_configure, get_empty_parameterset_mark

    pytest_configure(config)
    result_mark = get_empty_parameterset_mark(config, ["a"], all)
    if mark in (None, ""):
        # normalize to the requested name
        mark = "skip"
    assert result_mark.name == mark
    assert result_mark.kwargs["reason"].startswith("got empty parameter set ")
    if mark == "xfail":
        assert result_mark.kwargs.get("run") is False
Exemplo n.º 14
0
    def test_txtfile_with_usefixtures_in_ini(self, pytester: Pytester):
        pytester.makeini(
            """
            [pytest]
            usefixtures = myfixture
        """
        )
        pytester.makeconftest(
            """
            import pytest
            @pytest.fixture
            def myfixture(monkeypatch):
                monkeypatch.setenv("HELLO", "WORLD")
        """
        )

        p = pytester.maketxtfile(
            """
            >>> import os
            >>> os.environ["HELLO"]
            'WORLD'
        """
        )
        reprec = pytester.inline_run(p)
        reprec.assertoutcome(passed=1)
Exemplo n.º 15
0
def test_log_file_ini_level(pytester: Pytester) -> None:
    log_file = str(pytester.path.joinpath("pytest.log"))

    pytester.makeini("""
        [pytest]
        log_file={}
        log_file_level = INFO
        """.format(log_file))
    pytester.makepyfile("""
        import pytest
        import logging
        def test_log_file(request):
            plugin = request.config.pluginmanager.getplugin('logging-plugin')
            assert plugin.log_file_handler.level == logging.INFO
            logging.getLogger('catchlog').debug("This log message won't be shown")
            logging.getLogger('catchlog').info("This log message will be shown")
            print('PASSED')
    """)

    result = pytester.runpytest("-s")

    # fnmatch_lines does an assertion internally
    result.stdout.fnmatch_lines(["test_log_file_ini_level.py PASSED"])

    # make sure that that we get a '0' exit code for the testsuite
    assert result.ret == 0
    assert os.path.isfile(log_file)
    with open(log_file) as rfh:
        contents = rfh.read()
        assert "This log message will be shown" in contents
        assert "This log message won't be shown" not in contents
Exemplo n.º 16
0
 def test_cache_writefail_permissions(self, unwritable_cache_dir: Path,
                                      pytester: Pytester) -> None:
     pytester.makeini("[pytest]")
     config = pytester.parseconfigure()
     cache = config.cache
     assert cache is not None
     cache.set("test/broken", [])
Exemplo n.º 17
0
def test_log_file_unicode(pytester: Pytester) -> None:
    log_file = str(pytester.path.joinpath("pytest.log"))

    pytester.makeini("""
        [pytest]
        log_file={}
        log_file_level = INFO
        """.format(log_file))
    pytester.makepyfile("""\
        import logging

        def test_log_file():
            logging.getLogger('catchlog').info("Normal message")
            logging.getLogger('catchlog').info("├")
            logging.getLogger('catchlog').info("Another normal message")
        """)

    result = pytester.runpytest()

    # make sure that that we get a '0' exit code for the testsuite
    assert result.ret == 0
    assert os.path.isfile(log_file)
    with open(log_file, encoding="utf-8") as rfh:
        contents = rfh.read()
        assert "Normal message" in contents
        assert "├" in contents
        assert "Another normal message" in contents
Exemplo n.º 18
0
def test_package_ordering(pytester: Pytester) -> None:
    """
    .
    └── root
        ├── Test_root.py
        ├── __init__.py
        ├── sub1
        │   ├── Test_sub1.py
        │   └── __init__.py
        └── sub2
            └── test
                └── test_sub2.py

    """
    pytester.makeini(
        """
        [pytest]
        python_files=*.py
    """
    )
    root = pytester.mkpydir("root")
    sub1 = root.joinpath("sub1")
    sub1.mkdir()
    sub1.joinpath("__init__.py").touch()
    sub2 = root.joinpath("sub2")
    sub2_test = sub2.joinpath("test")
    sub2_test.mkdir(parents=True)

    root.joinpath("Test_root.py").write_text("def test_1(): pass")
    sub1.joinpath("Test_sub1.py").write_text("def test_2(): pass")
    sub2_test.joinpath("test_sub2.py").write_text("def test_3(): pass")

    # Execute from .
    result = pytester.runpytest("-v", "-s")
    result.assert_outcomes(passed=3)
Exemplo n.º 19
0
def test_parameterset_for_fail_at_collect(pytester: Pytester) -> None:
    pytester.makeini("""
    [pytest]
    {}=fail_at_collect
    """.format(EMPTY_PARAMETERSET_OPTION))

    config = pytester.parseconfig()
    from _pytest.mark import pytest_configure, get_empty_parameterset_mark

    pytest_configure(config)

    with pytest.raises(
            Collector.CollectError,
            match=r"Empty parameter set in 'pytest_configure' at line \d\d+",
    ):
        get_empty_parameterset_mark(config, ["a"], pytest_configure)

    p1 = pytester.makepyfile("""
        import pytest

        @pytest.mark.parametrize("empty", [])
        def test():
            pass
        """)
    result = pytester.runpytest(str(p1))
    result.stdout.fnmatch_lines([
        "collected 0 items / 1 error",
        "* ERROR collecting test_parameterset_for_fail_at_collect.py *",
        "Empty parameter set in 'test' at line 3",
        "*= 1 error in *",
    ])
    assert result.ret == ExitCode.INTERRUPTED
Exemplo n.º 20
0
def test_filterwarnings_mark(pytester: Pytester, default_config) -> None:
    """Test ``filterwarnings`` mark works and takes precedence over command
    line and ini options."""
    if default_config == "ini":
        pytester.makeini("""
            [pytest]
            filterwarnings = always::RuntimeWarning
        """)
    pytester.makepyfile("""
        import warnings
        import pytest

        @pytest.mark.filterwarnings('ignore::RuntimeWarning')
        def test_ignore_runtime_warning():
            warnings.warn(RuntimeWarning())

        @pytest.mark.filterwarnings('error')
        def test_warning_error():
            warnings.warn(RuntimeWarning())

        def test_show_warning():
            warnings.warn(RuntimeWarning())
    """)
    result = pytester.runpytest(
        "-W always::RuntimeWarning" if default_config == "cmdline" else "")
    result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warning in *"])
Exemplo n.º 21
0
 def test_cache_writefail_cachfile_silent(self, pytester: Pytester) -> None:
     pytester.makeini("[pytest]")
     pytester.path.joinpath(".pytest_cache").write_text("gone wrong")
     config = pytester.parseconfigure()
     cache = config.cache
     assert cache is not None
     cache.set("test/broken", [])
Exemplo n.º 22
0
 def test_custom_rel_cache_dir(self, pytester: Pytester) -> None:
     rel_cache_dir = os.path.join("custom_cache_dir", "subdir")
     pytester.makeini("""
         [pytest]
         cache_dir = {cache_dir}
     """.format(cache_dir=rel_cache_dir))
     pytester.makepyfile(test_errored="def test_error():\n    assert False")
     pytester.runpytest()
     assert pytester.path.joinpath(rel_cache_dir).is_dir()
Exemplo n.º 23
0
    def test_config_cache_mkdir(self, pytester: Pytester) -> None:
        pytester.makeini("[pytest]")
        config = pytester.parseconfigure()
        assert config.cache is not None
        with pytest.raises(ValueError):
            config.cache.mkdir("key/name")

        p = config.cache.mkdir("name")
        assert p.is_dir()
Exemplo n.º 24
0
 def test_config_cache_dataerror(self, pytester: Pytester) -> None:
     pytester.makeini("[pytest]")
     config = pytester.parseconfigure()
     assert config.cache is not None
     cache = config.cache
     pytest.raises(TypeError, lambda: cache.set("key/name", cache))
     config.cache.set("key/name", 0)
     config.cache._getvaluepath("key/name").write_bytes(b"123invalid")
     val = config.cache.get("key/name", -2)
     assert val == -2
Exemplo n.º 25
0
 def test_hidden_by_ini(self, pytester: Pytester) -> None:
     self.create_file(pytester)
     pytester.makeini("""
         [pytest]
         filterwarnings =
             ignore::DeprecationWarning
             ignore::PendingDeprecationWarning
     """)
     result = pytester.runpytest_subprocess()
     assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()
Exemplo n.º 26
0
 def test_custom_cache_dir_with_env_var(self, pytester: Pytester,
                                        monkeypatch: MonkeyPatch) -> None:
     monkeypatch.setenv("env_var", "custom_cache_dir")
     pytester.makeini("""
         [pytest]
         cache_dir = {cache_dir}
     """.format(cache_dir="$env_var"))
     pytester.makepyfile(test_errored="def test_error():\n    assert False")
     pytester.runpytest()
     assert pytester.path.joinpath("custom_cache_dir").is_dir()
Exemplo n.º 27
0
def test_ignore(pytester: Pytester, pyfile_with_warnings, method) -> None:
    args = ("-W", "ignore") if method == "cmdline" else ()
    if method == "ini":
        pytester.makeini("""
        [pytest]
        filterwarnings= ignore
        """)

    result = pytester.runpytest(*args, pyfile_with_warnings)
    result.stdout.fnmatch_lines(["* 1 passed in *"])
    assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()
Exemplo n.º 28
0
 def test_custom_abs_cache_dir(self, pytester: Pytester,
                               tmp_path_factory: TempPathFactory) -> None:
     tmp = tmp_path_factory.mktemp("tmp")
     abs_cache_dir = tmp / "custom_cache_dir"
     pytester.makeini("""
         [pytest]
         cache_dir = {cache_dir}
     """.format(cache_dir=abs_cache_dir))
     pytester.makepyfile(test_errored="def test_error():\n    assert False")
     pytester.runpytest()
     assert abs_cache_dir.is_dir()
Exemplo n.º 29
0
 def test_custom_abs_cache_dir(self, pytester: Pytester,
                               tmpdir_factory) -> None:
     tmp = str(tmpdir_factory.mktemp("tmp"))
     abs_cache_dir = os.path.join(tmp, "custom_cache_dir")
     pytester.makeini("""
         [pytest]
         cache_dir = {cache_dir}
     """.format(cache_dir=abs_cache_dir))
     pytester.makepyfile(test_errored="def test_error():\n    assert False")
     pytester.runpytest()
     assert Path(abs_cache_dir).is_dir()
Exemplo n.º 30
0
 def test_non_ignored_whitespace_glob(self, pytester: Pytester):
     pytester.makeini("""
         [pytest]
         doctest_optionflags = ELLIPSIS
     """)
     p = pytester.maketxtfile(xdoc="""
         >>> a = "foo    "
         >>> print(a)
         foo
     """)
     reprec = pytester.inline_run(p, "--doctest-glob=x*.txt")
     reprec.assertoutcome(failed=1, passed=0)