Beispiel #1
0
 def test_format_result(self):
     run = pyperf.Run([1.0, 1.5, 2.0],
                      warmups=[(1, 3.0)],
                      metadata={'name': 'mybench'},
                      collect_metadata=False)
     bench = pyperf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '1.50 sec +- 0.50 sec')
     self.assertEqual(cli.format_result(bench),
                      'Mean +- std dev: 1.50 sec +- 0.50 sec')
Beispiel #2
0
    def oneliner(self, verbose=True, show_name=True, check_significant=True):
        if check_significant and not self.significant:
            return "Not significant!"

        ref_text = format_result_value(self.ref.benchmark)
        chg_text = format_result_value(self.changed.benchmark)
        if verbose:
            if show_name:
                ref_text = "[%s] %s" % (self.ref.name, ref_text)
                chg_text = "[%s] %s" % (self.changed.name, chg_text)
            if (self.ref.benchmark.get_nvalue() > 1
                    or self.changed.benchmark.get_nvalue() > 1):
                text = "Mean +- std dev: %s -> %s" % (ref_text, chg_text)
            else:
                text = "%s -> %s" % (ref_text, chg_text)
        else:
            text = "%s -> %s" % (ref_text, chg_text)

        text = "%s: %s" % (text, format_normalized_mean(self.norm_mean))
        return text
Beispiel #3
0
 def test_format_result_calibration(self):
     run = pyperf.Run([],
                      warmups=[(100, 1.0)],
                      metadata={
                          'name': 'bench',
                          'loops': 100
                      },
                      collect_metadata=False)
     bench = pyperf.Benchmark([run])
     self.assertEqual(cli.format_result_value(bench),
                      '<calibration: 100 loops>')
     self.assertEqual(cli.format_result(bench), 'Calibration: 100 loops')
     self.assertRaises(ValueError, bench.median)
Beispiel #4
0
    def _compare_to(self):
        # Use lazy import to limit imports on 'import pyperf'
        from pyperf._compare import timeit_compare_benchs
        from pyperf._master import Master

        args = self.args
        python_ref = args.compare_to
        python_changed = args.python

        multiline = self._multiline_output()
        if args.python_names:
            name_ref, name_changed = args.python_names
        else:
            name_ref, name_changed = get_python_names(python_ref,
                                                      python_changed)

        benchs = []
        for python, name in ((python_ref, name_ref), (python_changed,
                                                      name_changed)):
            if self._worker_task > 0:
                print()

            if multiline:
                display_title('Benchmark %s' % name)
            elif not args.quiet:
                print(name, end=': ')

            bench = Master(self, python=python).create_bench()
            benchs.append(bench)

            if multiline:
                self._display_result(bench)
            elif not args.quiet:
                print(' ' + format_result_value(bench))

            if multiline:
                print()
            elif not args.quiet:
                warnings = format_checks(bench)
                if warnings:
                    print()
                    for line in warnings:
                        print(line)
                    print()

        if multiline:
            display_title('Compare')
        elif not args.quiet:
            print()
        timeit_compare_benchs(name_ref, benchs[0], name_changed, benchs[1],
                              args)