def pytest_runtest_logreport(self, report): """ Shows failures and errors as tests are running """ TerminalReporter.pytest_runtest_logreport(self, report) if report.failed and not hasattr(report, "wasxfail"): if self.verbosity <= 0: self._tw.line() self.print_failure(report)
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != 'call': return if self.config.getoption('--sys-stats') is False: return test_daemon = getattr(self._session, 'test_daemon', None) if self.verbosity == 1: line = ' [CPU:{0}%|MEM:{1}%]'.format( psutil.cpu_percent(), psutil.virtual_memory().percent) self._tw.write(line) return else: self.ensure_newline() template = ' {} - CPU: {:6.2f} % MEM: {:6.2f} % SWAP: {:6.2f} %\n' self._tw.write( template.format(' System', psutil.cpu_percent(), psutil.virtual_memory().percent, psutil.swap_memory().percent)) for name, psproc in self._session.stats_processes.items(): with psproc.oneshot(): cpu = psproc.cpu_percent() mem = psproc.memory_percent('vms') swap = psproc.memory_percent('swap') self._tw.write(template.format(name, cpu, mem, swap))
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != 'call': return if self.config.getoption('--sys-stats') is False: return if self.verbosity > 1: # Late Import self.ensure_newline() self.section('Processes Statistics', sep='-', bold=True) template = ' {} - CPU: {:6.2f} % MEM: {:6.2f} %' if not IS_WINDOWS: template += ' SWAP: {:6.2f} %' template += '\n' self.write( template.format(' System', psutil.cpu_percent(), psutil.virtual_memory().percent, psutil.swap_memory().percent)) for name, psproc in self._session.stats_processes.items(): with psproc.oneshot(): cpu = psproc.cpu_percent() mem = psproc.memory_percent('vms') if not IS_WINDOWS: swap = psproc.memory_percent('swap') formatted = template.format(name, cpu, mem, swap) else: formatted = template.format(name, cpu, mem) self.write(formatted)
def pytest_runtest_logreport(self, report): # Show failures and errors occuring during running a test # instantly. TerminalReporter.pytest_runtest_logreport(self, report) if report.failed and not hasattr(report, 'wasxfail'): if self.verbosity <= 0: self._tw.line() self.print_failure(report)
def pytest_runtest_logreport(self, report): # Show failures and errors occuring during running a test # instantly. TerminalReporter.pytest_runtest_logreport(self, report) if (report.failed or report.outcome == "rerun") and not hasattr(report, 'wasxfail'): if self.verbosity <= 0: self._tw.line() self.print_failure(report)
def pytest_runtest_logreport(self, report): rep = report res = self.config.hook.pytest_report_teststatus(report=rep) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if isinstance(word, tuple): word, word_markup = word else: if rep.passed: word_markup = {'green': True} elif rep.failed: word_markup = {'red': True} elif rep.skipped: word_markup = {'yellow': True} feature_markup = {'blue': True} scenario_markup = word_markup if self.verbosity <= 0: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity == 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write(' ') self._tw.write(word, **word_markup) self._tw.write('\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity > 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write('\n') for step in report.scenario['steps']: if self.config.option.expand: step_name = self._format_step_name(step['name'], **report.scenario['example_kwargs']) else: step_name = step['name'] self._tw.write(' {} {}\n'.format(step['keyword'], step_name), **scenario_markup) self._tw.write(' ' + word, **word_markup) self._tw.write('\n\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) self.stats.setdefault(cat, []).append(rep)
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) res = self.config.hook.pytest_report_teststatus(report=report) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if self.sqlcount: self._tw.write(" %d" % report.sqlcount, blue=True)
def pytest_runtest_logreport(self, report): rep = report res = self.config.hook.pytest_report_teststatus(report=rep) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if isinstance(word, tuple): word, word_markup = word else: if rep.passed: word_markup = {'green': True} elif rep.failed: word_markup = {'red': True} elif rep.skipped: word_markup = {'yellow': True} feature_markup = {'blue': True} scenario_markup = word_markup if self.verbosity <= 0: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity == 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write(' ') self._tw.write(word, **word_markup) self._tw.write('\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity > 1: if hasattr(report, 'scenario'): self.ensure_newline() self._tw.write('Feature: ', **feature_markup) self._tw.write(report.scenario['feature']['name'], **feature_markup) self._tw.write('\n') self._tw.write(' Scenario: ', **scenario_markup) self._tw.write(report.scenario['name'], **scenario_markup) self._tw.write('\n') for step in report.scenario['steps']: self._tw.write( ' {} {}\n'.format(step['keyword'], step['name']), **scenario_markup) self._tw.write(' ' + word, **word_markup) self._tw.write('\n\n') else: return TerminalReporter.pytest_runtest_logreport(self, rep) self.stats.setdefault(cat, []).append(rep)
def pytest_runtest_logreport(self, report): rep = report res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config) cat, letter, word = res if not letter and not word: # probably passed setup/teardown return if isinstance(word, tuple): word, word_markup = word else: if rep.passed: word_markup = {"green": True} elif rep.failed: word_markup = {"red": True} elif rep.skipped: word_markup = {"yellow": True} feature_markup = {"blue": True} scenario_markup = word_markup if self.verbosity <= 0: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity == 1: if hasattr(report, "scenario"): self.ensure_newline() self._tw.write("Feature: ", **feature_markup) self._tw.write(report.scenario["feature"]["name"], **feature_markup) self._tw.write("\n") self._tw.write(" Scenario: ", **scenario_markup) self._tw.write(report.scenario["name"], **scenario_markup) self._tw.write(" ") self._tw.write(word, **word_markup) self._tw.write("\n") else: return TerminalReporter.pytest_runtest_logreport(self, rep) elif self.verbosity > 1: if hasattr(report, "scenario"): self.ensure_newline() self._tw.write("Feature: ", **feature_markup) self._tw.write(report.scenario["feature"]["name"], **feature_markup) self._tw.write("\n") self._tw.write(" Scenario: ", **scenario_markup) self._tw.write(report.scenario["name"], **scenario_markup) self._tw.write("\n") for step in report.scenario["steps"]: if self.config.option.expand: step_name = self._format_step_name(step["name"], **report.scenario["example_kwargs"]) else: step_name = step["name"] self._tw.write(" {} {}\n".format(step["keyword"], step_name), **scenario_markup) self._tw.write(" " + word, **word_markup) self._tw.write("\n\n") else: return TerminalReporter.pytest_runtest_logreport(self, rep) self.stats.setdefault(cat, []).append(rep)
def pytest_sessionfinish(session, exitstatus): standard_reporter = session.config.pluginmanager.getplugin('terminalreporter') assert(isinstance(standard_reporter, TerminalReporterMPI)) # if(not standard_reporter.mpi_reporter.post_done): # standard_reporter.mpi_reporter.pytest_sessionfinish(session) assert(standard_reporter.mpi_reporter.post_done == True) for i_report, report in standard_reporter.mpi_reporter.reports_gather.items(): # print(" \n ", i_report, " 2/ ---> ", report, "\n") TerminalReporter.pytest_runtest_logreport(standard_reporter, report[0])
def pytest_runtest_logreport(self, report): # TODO: if we _need_ access to the test item/node itself, we may want # to implement pytest_runtest_makereport instead? (Feels a little # 'off', but without other grody hax, no real way to get the obj so...) # Non-verbose: do whatever normal pytest does. # TODO: kinda want colors & per-module headers/indent though... if not self.verbosity: return TerminalReporter.pytest_runtest_logreport(self, report) # First, the default impl of this method seems to take care of updating # overall run stats; if we don't repeat that we lose all end-of-run # tallying and whether the run failed...kind of important. (Why that's # not a separate hook, no idea :() self.update_stats(report) # After that, short-circuit if it's not reporting the main call (i.e. # we don't want to display "the test" during its setup or teardown) if report.when != "call": return id_ = report.nodeid # First, make sure we display non-per-test data, i.e. # module/class/nested class headers (which by necessity also includes # tracking indentation state.) self.ensure_headers(id_) # Then we can display the test name/status itself. self.display_result(report)
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != 'call': return if self.config.getoption('--sys-stats') is False: return if self.verbosity > 1: self.section('Statistics', sep='-', bold=True) template = ' {} - CPU: {:6.2f} % MEM: {:6.2f} % SWAP: {:6.2f} %\n' self._tw.write( template.format(' System', psutil.cpu_percent(), psutil.virtual_memory().percent, psutil.swap_memory().percent)) for name, psproc in self._session.stats_processes.items(): with psproc.oneshot(): cpu = psproc.cpu_percent() mem = psproc.memory_percent('vms') swap = psproc.memory_percent('swap') self._tw.write(template.format(name, cpu, mem, swap))
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != 'call': return if self.config.getoption('--sys-stats') is False: return if self.verbosity > 1: self.ensure_newline() self.section('Processes Statistics', sep='-', bold=True) left_padding = len( max(['System'] + list(self._session.stats_processes), key=len)) template = ' ...{} {} - CPU: {:6.2f} % MEM: {:6.2f} %' if not IS_WINDOWS: template += ' SWAP: {:6.2f} %' template += '\n' self.write( template.format('.' * (left_padding - len('System')), 'System', psutil.cpu_percent(), psutil.virtual_memory().percent, psutil.swap_memory().percent)) for name, psproc in self._session.stats_processes.items(): dots = '.' * (left_padding - len(name)) try: with psproc.oneshot(): cpu = psproc.cpu_percent() mem = psproc.memory_percent('vms') if not IS_WINDOWS: swap = psproc.memory_percent('swap') formatted = template.format( dots, name, cpu, mem, swap) else: formatted = template.format(dots, name, cpu, mem) self.write(formatted) except psutil.NoSuchProcess: continue
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != "call": return if self._show_sys_stats is False: return if self.verbosity > 1: remove_from_stats = set() self.ensure_newline() self.section("Processes Statistics", sep="-", bold=True) left_padding = len( max(["System"] + list(self._session.stats_processes), key=len)) template = ( " ...{dots} {name} - CPU: {cpu:6.2f} % MEM: {mem:6.2f} % (Virtual Memory)" ) stats = { "name": "System", "dots": "." * (left_padding - len("System")), "cpu": psutil.cpu_percent(), "mem": psutil.virtual_memory().percent, } swap = psutil.swap_memory().percent if swap > 0: template += " SWAP: {swap:6.2f} %" stats["swap"] = swap template += "\n" self.write(template.format(**stats)) template = " ...{dots} {name} - CPU: {cpu:6.2f} % MEM: {mem:6.2f} % ({m_type})" children_template = ( template + " MEM SUM: {c_mem} % ({m_type}) CHILD PROCS: {c_count}\n") no_children_template = template + "\n" for name, psproc in self._session.stats_processes.items(): template = no_children_template dots = "." * (left_padding - len(name)) pids = [] try: with psproc.oneshot(): stats = { "name": name, "dots": dots, "cpu": psproc.cpu_percent(), "mem": psproc.memory_percent(self._sys_stats_mem_type), "m_type": self._sys_stats_mem_type.upper(), } if self._sys_stats_no_children is False: pids.append(psproc.pid) children = psproc.children(recursive=True) if children: template = children_template stats["c_count"] = 0 c_mem = stats["mem"] for child in children: if child.pid in pids: continue pids.append(child.pid) if not psutil.pid_exists(child.pid): continue try: c_mem += child.memory_percent( self._sys_stats_mem_type) stats["c_count"] += 1 except (psutil.AccessDenied, psutil.NoSuchProcess): continue if stats["c_count"]: stats["c_mem"] = "{:6.2f}".format(c_mem) else: template = no_children_template self.write(template.format(**stats)) except psutil.NoSuchProcess: remove_from_stats.add(name) continue if remove_from_stats: for name in remove_from_stats: self._session.stats_processes.pop(name)
def pytest_runtest_logreport(self, report): TerminalReporter.pytest_runtest_logreport(self, report) if self.verbosity <= 0: return if report.when != 'call': return if self._show_sys_stats is False: return if self.verbosity > 1: self.ensure_newline() self.section('Processes Statistics', sep='-', bold=True) left_padding = len(max(['System'] + list(self._session.stats_processes), key=len)) template = ' ...{dots} {name} - CPU: {cpu:6.2f} % MEM: {mem:6.2f} % (Virtual Memory)' stats = { 'name': 'System', 'dots': '.' * (left_padding - len('System')), 'cpu': psutil.cpu_percent(), 'mem': psutil.virtual_memory().percent } swap = psutil.swap_memory().percent if swap > 0: template += ' SWAP: {swap:6.2f} %' stats['swap'] = swap template += '\n' self.write(template.format(**stats)) template = ' ...{dots} {name} - CPU: {cpu:6.2f} % MEM: {mem:6.2f} % ({m_type})' children_template = template + ' MEM SUM: {c_mem} % ({m_type}) CHILD PROCS: {c_count}\n' no_children_template = template + '\n' for name, psproc in self._session.stats_processes.items(): template = no_children_template dots = '.' * (left_padding - len(name)) pids = [] try: with psproc.oneshot(): stats = { 'name': name, 'dots': dots, 'cpu': psproc.cpu_percent(), 'mem': psproc.memory_percent(self._sys_stats_mem_type), 'm_type': self._sys_stats_mem_type.upper() } if self._sys_stats_no_children is False: pids.append(psproc.pid) children = psproc.children(recursive=True) if children: template = children_template stats['c_count'] = 0 c_mem = stats['mem'] for child in children: if child.pid in pids: continue pids.append(child.pid) if not psutil.pid_exists(child.pid): continue try: c_mem += child.memory_percent(self._sys_stats_mem_type) stats['c_count'] += 1 except (psutil.AccessDenied, psutil.NoSuchProcess): continue if stats['c_count']: stats['c_mem'] = '{:6.2f}'.format(c_mem) else: template = no_children_template self.write(template.format(**stats)) except psutil.NoSuchProcess: continue