Exemplo n.º 1
0
 def test_format_result_calibration(self):
     run = perf.Run([], warmups=[(100, 1.0)],
                    metadata={'name': 'bench', 'loops': 100},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench), '<calibration: 100 loops>')
     self.assertEqual(cli.format_result(bench), 'Calibration: 100 loops')
     self.assertRaises(ValueError, bench.median)
Exemplo n.º 2
0
 def test_format_result(self):
     run = perf.Run([1.0, 1.5, 2.0],
                    warmups=[(1, 3.0)],
                    metadata={'name': 'mybench'},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '1.50 sec +- 0.50 sec')
     self.assertEqual(cli.format_result(bench),
                      'Mean +- std dev: 1.50 sec +- 0.50 sec')
Exemplo n.º 3
0
 def test_format_result(self):
     run = perf.Run([1.0, 1.5, 2.0],
                    warmups=[(1, 3.0)],
                    metadata={'name': 'mybench'},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '1.50 sec +- 0.50 sec')
     self.assertEqual(cli.format_result(bench),
                      'Mean +- std dev: 1.50 sec +- 0.50 sec')
Exemplo n.º 4
0
 def test_format_result_calibration(self):
     run = perf.Run([], warmups=[(100, 1.0)],
                    metadata={'name': 'bench', 'loops': 100},
                    collect_metadata=False)
     bench = perf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '<calibration: 100 loops>')
     self.assertEqual(cli.format_result(bench),
                      'Calibration: 100 loops')
     self.assertRaises(ValueError, bench.median)
Exemplo n.º 5
0
def display_benchmarks(args, show_metadata=False, hist=False, stats=False,
                       dump=False, result=False, checks=False,
                       display_runs_args=None, only_checks=False):
    data = load_benchmarks(args)

    output = []

    if show_metadata:
        metadatas = [item.benchmark.get_metadata() for item in data]
        _display_common_metadata(metadatas, lines=output)

    if hist or stats or dump or show_metadata or (not result):
        use_title = True
    else:
        use_title = False
        if checks:
            for index, item in enumerate(data):
                warnings = format_checks(item.benchmark)
                if warnings:
                    use_title = True
                    break

    if use_title:
        show_filename = (data.get_nsuite() > 1)
        show_name = not data.has_same_unique_benchmark()
        if not show_filename and stats:
            show_filename = (len(data) > 1)

        suite = None
        for index, item in enumerate(data):
            lines = []

            if show_metadata:
                metadata = metadatas[index]
                if metadata:
                    empty_line(lines)
                    lines.append("Metadata:")
                    format_metadata(metadata, lines=lines)

            bench_lines = format_benchmark(item.benchmark,
                                           hist=hist,
                                           stats=stats,
                                           dump=dump,
                                           checks=checks,
                                           result=result,
                                           display_runs_args=display_runs_args)

            if bench_lines:
                empty_line(lines)
                lines.extend(bench_lines)

            if lines:
                bench_lines = lines
                lines = []

                if show_filename and item.suite is not suite:
                    suite = item.suite
                    format_title(item.filename, 1, lines=lines)

                    if stats and len(suite) > 1:
                        empty_line(lines)

                        duration = suite.get_total_duration()
                        lines.append("Number of benchmarks: %s" % len(suite))
                        lines.append("Total duration: %s" % format_seconds(duration))
                        dates = suite.get_dates()
                        if dates:
                            start, end = dates
                            lines.append("Start date: %s" % format_datetime(start, microsecond=False))
                            lines.append("End date: %s" % format_datetime(end, microsecond=False))

                if show_name:
                    format_title(item.name, 2, lines=lines)

                empty_line(lines)
                lines.extend(bench_lines)

            if lines:
                empty_line(output)
                output.extend(lines)

        for line in output:
            print(line)

        if not output and only_checks:
            if len(data) == 1:
                print("The benchmark seems to be stable")
            else:
                print("All benchmarks seem to be stable")
    else:
        for line in output:
            print(line)

        show_filename = (data.get_nsuite() > 1)

        suite = None
        for item in data:
            if show_filename and item.suite is not suite:
                if suite is not None:
                    print()

                suite = item.suite
                display_title(item.filename, 1)

            line = format_result(item.benchmark)
            if item.title:
                line = '%s: %s' % (item.name, line)
            print(line)