def cmd_hist(args): checks = not args.quiet data = load_benchmarks(args) ignored = list(data.group_by_name_ignored()) groups = data.group_by_name() show_filename = (data.get_nsuite() > 1) show_group_name = (len(groups) > 1) for name, benchmarks, is_last in groups: if show_group_name: display_title(name) benchmarks = [(benchmark, filename if show_filename else None) for benchmark, title, filename in benchmarks] for line in format_histogram(benchmarks, bins=args.bins, extend=args.extend, checks=checks): print(line) if not (is_last or ignored): print() for suite, ignored in ignored: for bench in ignored: name = bench.get_name() print("[ %s ]" % name) for line in format_histogram([name], bins=args.bins, extend=args.extend, checks=checks): print(line)
def compare_geometric_mean(self): all_results = self.all_results # use a list since two filenames can be identical, # even if results are different all_norm_means = [] for item in all_results[0]: all_norm_means.append((item.changed.name, [])) for results in all_results: for index, result in enumerate(results): all_norm_means[index][1].append(result.norm_mean) if len(all_norm_means[0][1]) < 2: # only compute the geometric mean when there is at least two benchmarks return print() if len(all_norm_means) > 1: display_title('Geometric mean') for name, norm_means in all_norm_means: geo_mean = format_geometric_mean(norm_means) print(f'{name}: {geo_mean}') else: geo_mean = format_geometric_mean(all_norm_means[0][1]) print(f'Geometric mean: {geo_mean}')
def compare_suites_list(self): not_significant = [] empty_line = False last_index = (len(self.all_results) - 1) for index, results in enumerate(self.all_results): significant = any(result.significant for result in results) lines = [] for result in results: lines.extend(result.format(self.verbose)) if not (significant or self.verbose): not_significant.append(results.name) continue if len(lines) != 1: if self.show_name: display_title(results.name) for line in lines: print(line) if index != last_index: print() else: text = lines[0] if self.show_name: text = '%s: %s' % (results.name, text) print(text) empty_line = True if not self.quiet and not_significant: if empty_line: print() self.display_not_signiticant(not_significant)
def compare_suites_list(all_results, show_name, args): not_significant = [] for index, results in enumerate(all_results): significant = any(result.significant for result in results) lines = [] for result in results: lines.extend(result.format(args.verbose)) if not (significant or args.verbose): not_significant.append(results.name) continue if len(lines) != 1: if show_name: display_title(results.name) for line in lines: print(line) if index != len(all_results) - 1: print() else: text = lines[0] if show_name: text = '%s: %s' % (results.name, text) print(text) if not args.quiet: if not_significant: print("Benchmark hidden because not significant (%s): %s" % (len(not_significant), ', '.join(not_significant)))
def write_messages(self, title, messages): if not messages: return print() display_title(title) for msg in messages: print(msg)
def _compare_to(self): # Use lazy import to limit imports on 'import pyperf' from pyperf._compare import timeit_compare_benchs from pyperf._master import Master args = self.args python_ref = args.compare_to python_changed = args.python multiline = self._multiline_output() if args.python_names: name_ref, name_changed = args.python_names else: name_ref, name_changed = get_python_names(python_ref, python_changed) benchs = [] for python, name in ((python_ref, name_ref), (python_changed, name_changed)): if self._worker_task > 0: print() if multiline: display_title('Benchmark %s' % name) elif not args.quiet: print(name, end=': ') bench = Master(self, python=python).create_bench() benchs.append(bench) if multiline: self._display_result(bench) elif not args.quiet: print(' ' + format_result_value(bench)) if multiline: print() elif not args.quiet: warnings = format_checks(bench) if warnings: print() for line in warnings: print(line) print() if multiline: display_title('Compare') elif not args.quiet: print() timeit_compare_benchs(name_ref, benchs[0], name_changed, benchs[1], args)
def cmd_slowest(args): data = load_benchmarks(args, name=False) nslowest = args.n use_title = (data.get_nsuite() > 1) for item in data.iter_suites(): if use_title: display_title(item.filename, 1) benchs = [] for bench in item.suite: duration = bench.get_total_duration() benchs.append((duration, bench)) benchs.sort(key=lambda item: item[0], reverse=True) for index, item in enumerate(benchs[:nslowest], 1): duration, bench = item print("#%s: %s (%s)" % (index, bench.get_name(), format_timedelta(duration)))
def display_benchmarks(args, show_metadata=False, hist=False, stats=False, dump=False, result=False, checks=False, display_runs_args=None, only_checks=False): data = load_benchmarks(args) output = [] if show_metadata: metadatas = [item.benchmark.get_metadata() for item in data] _display_common_metadata(metadatas, lines=output) if hist or stats or dump or show_metadata or (not result): use_title = True else: use_title = False if checks: for index, item in enumerate(data): warnings = format_checks(item.benchmark) if warnings: use_title = True break if use_title: show_filename = (data.get_nsuite() > 1) show_name = not data.has_same_unique_benchmark() if not show_filename and stats: show_filename = (len(data) > 1) suite = None for index, item in enumerate(data): lines = [] if show_metadata: metadata = metadatas[index] if metadata: empty_line(lines) lines.append("Metadata:") format_metadata(metadata, lines=lines) bench_lines = format_benchmark(item.benchmark, hist=hist, stats=stats, dump=dump, checks=checks, result=result, display_runs_args=display_runs_args) if bench_lines: empty_line(lines) lines.extend(bench_lines) if lines: bench_lines = lines lines = [] if show_filename and item.suite is not suite: suite = item.suite format_title(item.filename, 1, lines=lines) if stats and len(suite) > 1: empty_line(lines) duration = suite.get_total_duration() lines.append("Number of benchmarks: %s" % len(suite)) lines.append("Total duration: %s" % format_seconds(duration)) dates = suite.get_dates() if dates: start, end = dates lines.append( "Start date: %s" % format_datetime(start, microsecond=False)) lines.append( "End date: %s" % format_datetime(end, microsecond=False)) if show_name: format_title(item.name, 2, lines=lines) empty_line(lines) lines.extend(bench_lines) if lines: empty_line(output) output.extend(lines) for line in output: print(line) if not output and only_checks: if len(data) == 1: print("The benchmark seems to be stable") else: print("All benchmarks seem to be stable") else: for line in output: print(line) show_filename = (data.get_nsuite() > 1) suite = None for item in data: if show_filename and item.suite is not suite: if suite is not None: print() suite = item.suite display_title(item.filename, 1) line = format_result(item.benchmark) if item.title: line = '%s: %s' % (item.name, line) print(line)