Esempio n. 1
0
    def _setup_cli_logging(self):
        config = self._config
        terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
        if terminal_reporter is None:
            # terminal reporter is disabled e.g. by pytest-xdist.
            return

        capture_manager = config.pluginmanager.get_plugin("capturemanager")
        # if capturemanager plugin is disabled, live logging still works.
        log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
        log_cli_format = get_option_ini(config, "log_cli_format", "log_format")
        log_cli_date_format = get_option_ini(
            config, "log_cli_date_format", "log_date_format"
        )
        if (
            config.option.color != "no"
            and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format)
        ):
            log_cli_formatter = ColoredLevelFormatter(
                create_terminal_writer(config),
                log_cli_format,
                datefmt=log_cli_date_format,
            )
        else:
            log_cli_formatter = logging.Formatter(
                log_cli_format, datefmt=log_cli_date_format
            )
        log_cli_level = get_actual_log_level(config, "log_cli_level", "log_level")
        self.log_cli_handler = log_cli_handler
        self.live_logs_context = lambda: catching_logs(
            log_cli_handler, formatter=log_cli_formatter, level=log_cli_level
        )
Esempio n. 2
0
    def _create_formatter(self, log_format, log_date_format):
        # color option doesn't exist if terminal plugin is disabled
        color = getattr(self._config.option, "color", "no")
        if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(
            log_format
        ):
            formatter = ColoredLevelFormatter(
                create_terminal_writer(self._config), log_format, log_date_format
            )
        else:
            formatter = logging.Formatter(log_format, log_date_format)

        if not six.PY2:
            formatter._style = PercentStyleMultiline(formatter._style._fmt)
        return formatter
Esempio n. 3
0
    def _setup_cli_logging(self):
        """Sets up the handler and logger for the Live Logs feature, if enabled.

        This must be done right before starting the loop so we can access the terminal reporter plugin.
        """
        terminal_reporter = self._config.pluginmanager.get_plugin('terminalreporter')
        if self._config.getini('log_cli') and terminal_reporter is not None:
            capture_manager = self._config.pluginmanager.get_plugin('capturemanager')
            log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)
            log_cli_format = get_option_ini(self._config, 'log_cli_format', 'log_format')
            log_cli_date_format = get_option_ini(self._config, 'log_cli_date_format', 'log_date_format')
            if self._config.option.color != 'no' and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format):
                log_cli_formatter = ColoredLevelFormatter(create_terminal_writer(self._config),
                                                          log_cli_format, datefmt=log_cli_date_format)
            else:
                log_cli_formatter = logging.Formatter(log_cli_format, datefmt=log_cli_date_format)
            log_cli_level = get_actual_log_level(self._config, 'log_cli_level', 'log_level')
            self.log_cli_handler = log_cli_handler
            self.live_logs_context = catching_logs(log_cli_handler, formatter=log_cli_formatter, level=log_cli_level)
        else:
            self.live_logs_context = _dummy_context_manager()
Esempio n. 4
0
def pytest_terminal_summary_main(tr, id):
    """
    Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
    directory. The report files are prefixed with the test suite name.

    This function emulates --duration and -rA pytest arguments.

    This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
    there.

    Args:

    - tr: `terminalreporter` passed from `conftest.py`
    - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
      needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.

    NB: this functions taps into a private _pytest API and while unlikely, it could break should
    pytest do internal changes - also it calls default internal methods of terminalreporter which
    can be hijacked by various `pytest-` plugins and interfere.

    """
    from _pytest.config import create_terminal_writer

    if not len(id):
        id = "tests"

    config = tr.config
    orig_writer = config.get_terminal_writer()
    orig_tbstyle = config.option.tbstyle
    orig_reportchars = tr.reportchars

    dir = "reports"
    Path(dir).mkdir(parents=True, exist_ok=True)
    report_files = {
        k: f"{dir}/{id}_{k}.txt"
        for k in [
            "durations",
            "errors",
            "failures_long",
            "failures_short",
            "failures_line",
            "passes",
            "stats",
            "summary_short",
            "warnings",
        ]
    }

    # custom durations report
    # note: there is no need to call pytest --durations=XX to get this separate report
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
    dlist = []
    for replist in tr.stats.values():
        for rep in replist:
            if hasattr(rep, "duration"):
                dlist.append(rep)
    if dlist:
        dlist.sort(key=lambda x: x.duration, reverse=True)
        with open(report_files["durations"], "w") as f:
            durations_min = 0.05  # sec
            f.write("slowest durations\n")
            for i, rep in enumerate(dlist):
                if rep.duration < durations_min:
                    f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
                    break
                f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")

    def summary_failures_short(tr):
        # expecting that the reports were --tb=long (default) so we chop them off here to the last frame
        reports = tr.getreports("failed")
        if not reports:
            return
        tr.write_sep("=", "FAILURES SHORT STACK")
        for rep in reports:
            msg = tr._getfailureheadline(rep)
            tr.write_sep("_", msg, red=True, bold=True)
            # chop off the optional leading extra frames, leaving only the last one
            longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
            tr._tw.line(longrepr)
            # note: not printing out any rep.sections to keep the report short

    # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
    # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
    # pytest-instafail does that)

    # report failures with line/short/long styles
    config.option.tbstyle = "auto"  # full tb
    with open(report_files["failures_long"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    # config.option.tbstyle = "short" # short tb
    with open(report_files["failures_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        summary_failures_short(tr)

    config.option.tbstyle = "line"  # one line per error
    with open(report_files["failures_line"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    with open(report_files["errors"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_errors()

    with open(report_files["warnings"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_warnings()  # normal warnings
        tr.summary_warnings()  # final warnings

    tr.reportchars = "wPpsxXEf"  # emulate -rA (used in summary_passes() and short_test_summary())
    with open(report_files["passes"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_passes()

    with open(report_files["summary_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.short_test_summary()

    with open(report_files["stats"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_stats()

    # restore:
    tr._tw = orig_writer
    tr.reportchars = orig_reportchars
    config.option.tbstyle = orig_tbstyle
def pytest_terminal_summary_main(tr, id):
    """
    Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
    directory. The report files are prefixed with the test suite name.

    This function emulates --duration and -rA pytest arguments.

    This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
    there.

    Args:
    - tr: `terminalreporter` passed from `conftest.py`
    - id: unique id like `tests` or `examples` that will be incorporated into the final reports
      filenames - this is needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.

    NB: this functions taps into a private _pytest API and while unlikely, it could break should
    pytest do internal changes - also it calls default internal methods of terminalreporter which
    can be hijacked by various `pytest-` plugins and interfere.

    """
    from _pytest.config import create_terminal_writer

    if not len(id):
        id = "tests"

    config = tr.config
    orig_writer = config.get_terminal_writer()
    orig_tbstyle = config.option.tbstyle
    orig_reportchars = tr.reportchars

    report_files = dict(
        durations="durations",
        short_summary="short_summary",
        summary_errors="errors",
        summary_failures="failures",
        summary_warnings="warnings",
        summary_passes="passes",
        summary_stats="stats",
    )
    dir = "reports"
    Path(dir).mkdir(parents=True, exist_ok=True)
    report_files.update(
        (k, f"{dir}/report_{id}_{v}.txt") for k, v in report_files.items())

    # custom durations report
    # note: there is no need to call pytest --durations=XX to get this separate report
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
    dlist = []
    for replist in tr.stats.values():
        for rep in replist:
            if hasattr(rep, "duration"):
                dlist.append(rep)
    if dlist:
        dlist.sort(key=lambda x: x.duration, reverse=True)
        with open(report_files["durations"], "w") as f:
            durations_min = 0.05  # sec
            f.write("slowest durations\n")
            for i, rep in enumerate(dlist):
                if rep.duration < durations_min:
                    f.write(
                        f"{len(dlist)-i} durations < {durations_min} secs were omitted"
                    )
                    break
                f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")

    # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
    # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
    # pytest-instafail does that)
    tr.reportchars = "wPpsxXEf"  # emulate -rA (used in summary_passes() and short_test_summary())
    config.option.tbstyle = "auto"
    with open(report_files["summary_failures"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    with open(report_files["summary_errors"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_errors()

    with open(report_files["summary_warnings"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_warnings()  # normal warnings
        tr.summary_warnings()  # final warnings

    with open(report_files["summary_passes"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_passes()

    with open(report_files["short_summary"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.short_test_summary()

    with open(report_files["summary_stats"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_stats()

    # restore:
    tr._tw = orig_writer
    tr.reportchars = orig_reportchars
    config.option.tbstyle = orig_tbstyle
Esempio n. 6
0
 def __init__(self, config):
     from _pytest.config import create_terminal_writer
     self.filename = '.last-run'
     self.results = {}
     self.ran_any = False
     self._tw = create_terminal_writer(config, sys.stdout)
 def _create_terminal_writer(config: Config, _file: Optional[TextIO] = None) -> TerminalWriter:
     file = output if output is not None else get_sink_io()
     return create_terminal_writer(config, file)
Esempio n. 8
0
def pytest_collection_modifyitems(session, config, items):
    yield
    group_count = config.getoption('test-group-count')
    group_id = config.getoption('test-group')
    seed = config.getoption('random-seed')
    prescheduled_path = config.getoption('prescheduled')

    if not group_count or not group_id:
        return

    test_dict = {item.name: item for item in items}
    original_order = {item: index for index, item in enumerate(items)}

    # schema: prescheduled_data[node_id] = [*test_names]
    prescheduled_data = [[] for _ in range(group_count)]
    if prescheduled_path:
        try:
            with open(prescheduled_path, 'r') as f:
                prescheduled_data = json.load(f)
                if len(prescheduled_data) != group_count:
                    print(
                        'WARNING: Prescheduled tests do not match up with the group count. '
                        'Prescheduling will be skipped.')
        except Exception:
            print(
                'WARNING: Unable to load prescheduled tests. Prescheduling will be skipped.'
            )

    all_prescheduled_tests = [
        test_dict[test_name] for sublist in prescheduled_data
        for test_name in sublist if test_name in test_dict
    ]
    prescheduled_tests = [
        test_dict[test_name] for test_name in prescheduled_data[group_id - 1]
        if test_name in test_dict
    ]
    unscheduled_tests = [
        item for item in items if item not in all_prescheduled_tests
    ]
    unscheduled_tests.sort(key=lambda x: x.name)

    if seed is not False:
        seeded = Random(seed)
        seeded.shuffle(unscheduled_tests)

    total_unscheduled_items = len(unscheduled_tests)

    group_size = get_group_size(total_unscheduled_items, group_count)
    tests_in_group = get_group(unscheduled_tests, group_size, group_id)
    items[:] = tests_in_group + prescheduled_tests

    items.sort(key=original_order.__getitem__)

    terminal_reporter = config.pluginmanager.get_plugin('terminalreporter')
    if terminal_reporter is not None:
        terminal_writer = create_terminal_writer(config)
        message = terminal_writer.markup(
            '\nRunning test group #{0} ({1} tests)\n'.format(
                group_id, len(items)),
            yellow=True)
        terminal_reporter.write(message)
        message = terminal_writer.markup(
            '\n'.join([item.name for item in items]) + '\n', yellow=True)
        terminal_reporter.write(message)