def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: if self.known_failure_cases: terminalreporter.section('Known failure cases', bold=True, yellow=True) terminalreporter.line('\n'.join(self.known_failure_cases)) if self.failed_cases: terminalreporter.section('Failed cases', bold=True, red=True) terminalreporter.line('\n'.join(self.failed_cases))
def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int, config): yield if hasattr(config, "all_confs"): terminalreporter.section("OCR Avg Confidences") terminalreporter.write("Conf.\tSimil.\tTest Explanation\n") terminalreporter.write("-" * 65 + "\n") for c in config.all_confs: terminalreporter.write(f"{c[0]:.1f}\t{c[1]:.2f}\t({c[2]})\n") terminalreporter.write("-" * 65 + "\n") conf_sum = sum([c[0] for c in config.all_confs]) sim_sum = sum([c[1] for c in config.all_confs]) count = len(config.all_confs) terminalreporter.write( f"{conf_sum/count:.2f}\t{sim_sum/count:.2f}\tMean\n") terminalreporter.write(f"{conf_sum:.2f}\t{sim_sum:.2f}\tSum\n")
def pytest_terminal_summary(terminalreporter: TerminalReporter, exitstatus: int, config: Config): yield revision = os.getenv("GITHUB_SHA", "local") platform = os.getenv("PLATFORM", "local") terminalreporter.section("Benchmark results", "-") result = [] for test_report in terminalreporter.stats.get("passed", []): result_entry = [] for _, recorded_property in test_report.user_properties: terminalreporter.write("{}.{}: ".format(test_report.head_line, recorded_property["name"])) unit = recorded_property["unit"] value = recorded_property["value"] if unit == "MB": terminalreporter.write("{0:,.0f}".format(value), green=True) elif unit in ("s", "ms") and isinstance(value, float): terminalreporter.write("{0:,.3f}".format(value), green=True) elif isinstance(value, float): terminalreporter.write("{0:,.4f}".format(value), green=True) else: terminalreporter.write(str(value), green=True) terminalreporter.line(" {}".format(unit)) result_entry.append(recorded_property) result.append({ "suit": test_report.nodeid, "total_duration": test_report.duration, "data": result_entry, }) out_dir = config.getoption("out_dir") if out_dir is None: warnings.warn("no out dir provided to store performance test results") return if not result: warnings.warn("no results to store (no passed test suites)") return get_out_path(Path(out_dir), revision=revision).write_text( json.dumps({ "revision": revision, "platform": platform, "result": result }, indent=4))
def pytest_terminal_summary(self, terminalreporter: TerminalReporter, exitstatus: ExitCode, config: Config) -> None: if self.exception: terminalreporter.ensure_newline() terminalreporter.section('Jira XRAY', sep='-', red=True, bold=True) terminalreporter.write_line( 'Could not publish results to Jira XRAY!') if self.exception.message: terminalreporter.write_line(self.exception.message) else: if self.issue_id and self.logfile: terminalreporter.write_sep( '-', f'Generated XRAY execution report file: {Path(self.logfile).absolute()}' ) elif self.issue_id: terminalreporter.write_sep( '-', f'Uploaded results to JIRA XRAY. Test Execution Id: {self.issue_id}' )