Ejemplo n.º 1
0
 def test_format_seconds(self):
     self.assertEqual(format_seconds(0), "0 sec")
     self.assertEqual(format_seconds(316e-4), "31.6 ms")
     self.assertEqual(format_seconds(15.9), "15.9 sec")
     self.assertEqual(format_seconds(3 * 60 + 15.9), "3 min 15.9 sec")
     self.assertEqual(format_seconds(404683.5876653),
                      "4 day 16 hour 24 min")
Ejemplo n.º 2
0
 def test_format_seconds(self):
     self.assertEqual(format_seconds(0),
                      "0 sec")
     self.assertEqual(format_seconds(316e-4),
                      "31.6 ms")
     self.assertEqual(format_seconds(15.9),
                      "15.9 sec")
     self.assertEqual(format_seconds(3 * 60 + 15.9),
                      "3 min 15.9 sec")
     self.assertEqual(format_seconds(404683.5876653),
                      "4 day 16 hour 24 min")
Ejemplo n.º 3
0
Archivo: _cli.py Proyecto: zooba/perf
def format_stats(bench, lines):
    fmt = bench.format_value
    values = bench.get_values()

    nrun = bench.get_nrun()
    nvalue = len(values)

    empty_line(lines)

    # Total duration
    duration = bench.get_total_duration()
    if duration:
        lines.append("Total duration: %s" % format_seconds(duration))

    # Start/End dates
    dates = bench.get_dates()
    if dates:
        start, end = dates
        lines.append("Start date: %s" %
                     format_datetime(start, microsecond=False))
        lines.append("End date: %s" % format_datetime(end, microsecond=False))

    # Raw value minimize/maximum
    raw_values = bench._get_raw_values()
    lines.append("Raw value minimum: %s" % bench.format_value(min(raw_values)))
    lines.append("Raw value maximum: %s" % bench.format_value(max(raw_values)))
    lines.append('')

    # Number of values
    ncalibration_runs = sum(run._is_calibration() for run in bench._runs)
    lines.append("Number of calibration run: %s" %
                 format_number(ncalibration_runs))
    lines.append("Number of run with values: %s" %
                 (format_number(nrun - ncalibration_runs)))
    lines.append("Total number of run: %s" % format_number(nrun))
    lines.append('')

    # Number of values
    nwarmup = bench._get_nwarmup()
    text = format_number(nwarmup)
    if isinstance(nwarmup, float):
        text += ' (average)'
    lines.append('Number of warmup per run: %s' % text)

    nvalue_per_run = bench._get_nvalue_per_run()
    text = format_number(nvalue_per_run)
    if isinstance(nvalue_per_run, float):
        text += ' (average)'
    lines.append('Number of value per run: %s' % text)

    # Loop iterations per value
    loops = bench.get_loops()
    inner_loops = bench.get_inner_loops()
    total_loops = loops * inner_loops
    if isinstance(total_loops, int):
        text = format_number(total_loops)
    else:
        text = "%s (average)" % total_loops

    if not (isinstance(inner_loops, int) and inner_loops == 1):
        if isinstance(loops, int):
            loops = format_number(loops, 'outer-loop')
        else:
            loops = '%.1f outer-loops (average)'

        if isinstance(inner_loops, int):
            inner_loops = format_number(inner_loops, 'inner-loop')
        else:
            inner_loops = "%.1f inner-loops (average)" % inner_loops

        text = '%s (%s x %s)' % (text, loops, inner_loops)

    lines.append("Loop iterations per value: %s" % text)
    lines.append("Total number of values: %s" % format_number(nvalue))
    lines.append('')

    # Minimum
    table = []
    table.append(("Minimum", bench.format_value(min(values))))

    # Median +- MAD
    median = bench.median()
    if len(values) > 2:
        median_abs_dev = bench.median_abs_dev()
        table.append(("Median +- MAD", "%s +- %s" % bench.format_values(
            (median, median_abs_dev))))
    else:
        table.append(("Mean", bench.format_value(median)))

    # Mean +- std dev
    mean = bench.mean()
    if len(values) > 2:
        stdev = bench.stdev()
        table.append(("Mean +- std dev", "%s +- %s" % bench.format_values(
            (mean, stdev))))
    else:
        table.append(("Mean", bench.format_value(mean)))

    table.append(("Maximum", bench.format_value(max(values))))

    # Render table
    width = max(len(row[0]) + 1 for row in table)
    for key, value in table:
        key = (key + ':').ljust(width)
        lines.append("%s %s" % (key, value))
    lines.append('')

    def format_limit(mean, value):
        return ("%s (%+.0f%% of the mean)" % (fmt(value),
                                              (value - mean) * 100.0 / mean))

    # Percentiles
    for p in (0, 5, 25, 50, 75, 95, 100):
        text = format_limit(mean, bench.percentile(p))
        text = "%3sth percentile: %s" % (p, text)
        name = PERCENTILE_NAMES.get(p)
        if name:
            text = '%s -- %s' % (text, name)
        lines.append(text)
    lines.append('')

    # Outliers
    q1 = bench.percentile(25)
    q3 = bench.percentile(75)
    iqr = q3 - q1
    outlier_min = (q1 - 1.5 * iqr)
    outlier_max = (q3 + 1.5 * iqr)
    noutlier = sum(not (outlier_min <= value <= outlier_max)
                   for value in values)
    bounds = bench.format_values((outlier_min, outlier_max))
    lines.append('Number of outlier (out of %s..%s): %s' %
                 (bounds[0], bounds[1], format_number(noutlier)))

    return lines
Ejemplo n.º 4
0
def display_benchmarks(args, show_metadata=False, hist=False, stats=False,
                       dump=False, result=False, checks=False,
                       display_runs_args=None, only_checks=False):
    data = load_benchmarks(args)

    output = []

    if show_metadata:
        metadatas = [item.benchmark.get_metadata() for item in data]
        _display_common_metadata(metadatas, lines=output)

    if hist or stats or dump or show_metadata or (not result):
        use_title = True
    else:
        use_title = False
        if checks:
            for index, item in enumerate(data):
                warnings = format_checks(item.benchmark)
                if warnings:
                    use_title = True
                    break

    if use_title:
        show_filename = (data.get_nsuite() > 1)
        show_name = not data.has_same_unique_benchmark()
        if not show_filename and stats:
            show_filename = (len(data) > 1)

        suite = None
        for index, item in enumerate(data):
            lines = []

            if show_metadata:
                metadata = metadatas[index]
                if metadata:
                    empty_line(lines)
                    lines.append("Metadata:")
                    format_metadata(metadata, lines=lines)

            bench_lines = format_benchmark(item.benchmark,
                                           hist=hist,
                                           stats=stats,
                                           dump=dump,
                                           checks=checks,
                                           result=result,
                                           display_runs_args=display_runs_args)

            if bench_lines:
                empty_line(lines)
                lines.extend(bench_lines)

            if lines:
                bench_lines = lines
                lines = []

                if show_filename and item.suite is not suite:
                    suite = item.suite
                    format_title(item.filename, 1, lines=lines)

                    if stats and len(suite) > 1:
                        empty_line(lines)

                        duration = suite.get_total_duration()
                        lines.append("Number of benchmarks: %s" % len(suite))
                        lines.append("Total duration: %s" % format_seconds(duration))
                        dates = suite.get_dates()
                        if dates:
                            start, end = dates
                            lines.append("Start date: %s" % format_datetime(start, microsecond=False))
                            lines.append("End date: %s" % format_datetime(end, microsecond=False))

                if show_name:
                    format_title(item.name, 2, lines=lines)

                empty_line(lines)
                lines.extend(bench_lines)

            if lines:
                empty_line(output)
                output.extend(lines)

        for line in output:
            print(line)

        if not output and only_checks:
            if len(data) == 1:
                print("The benchmark seems to be stable")
            else:
                print("All benchmarks seem to be stable")
    else:
        for line in output:
            print(line)

        show_filename = (data.get_nsuite() > 1)

        suite = None
        for item in data:
            if show_filename and item.suite is not suite:
                if suite is not None:
                    print()

                suite = item.suite
                display_title(item.filename, 1)

            line = format_result(item.benchmark)
            if item.title:
                line = '%s: %s' % (item.name, line)
            print(line)
Ejemplo n.º 5
0
Archivo: _cli.py Proyecto: haypo/perf
def format_stats(bench, lines):
    fmt = bench.format_value
    values = bench.get_values()

    nrun = bench.get_nrun()
    nvalue = len(values)

    empty_line(lines)

    # Total duration
    duration = bench.get_total_duration()
    if duration:
        lines.append("Total duration: %s" % format_seconds(duration))

    # Start/End dates
    dates = bench.get_dates()
    if dates:
        start, end = dates
        lines.append("Start date: %s" % format_datetime(start, microsecond=False))
        lines.append("End date: %s" % format_datetime(end, microsecond=False))

    # Raw value minimize/maximum
    raw_values = bench._get_raw_values()
    lines.append("Raw value minimum: %s" % bench.format_value(min(raw_values)))
    lines.append("Raw value maximum: %s" % bench.format_value(max(raw_values)))
    lines.append('')

    # Number of values
    ncalibration_runs = sum(run._is_calibration() for run in bench._runs)
    lines.append("Number of calibration run: %s"
                 % format_number(ncalibration_runs))
    lines.append("Number of run with values: %s"
                 % (format_number(nrun - ncalibration_runs)))
    lines.append("Total number of run: %s" % format_number(nrun))
    lines.append('')

    # Number of values
    nwarmup = bench._get_nwarmup()
    text = format_number(nwarmup)
    if isinstance(nwarmup, float):
        text += ' (average)'
    lines.append('Number of warmup per run: %s' % text)

    nvalue_per_run = bench._get_nvalue_per_run()
    text = format_number(nvalue_per_run)
    if isinstance(nvalue_per_run, float):
        text += ' (average)'
    lines.append('Number of value per run: %s' % text)

    # Loop iterations per value
    loops = bench.get_loops()
    inner_loops = bench.get_inner_loops()
    total_loops = loops * inner_loops
    if isinstance(total_loops, int):
        text = format_number(total_loops)
    else:
        text = "%s (average)" % total_loops

    if not(isinstance(inner_loops, int) and inner_loops == 1):
        if isinstance(loops, int):
            loops = format_number(loops, 'outer-loop')
        else:
            loops = '%.1f outer-loops (average)'

        if isinstance(inner_loops, int):
            inner_loops = format_number(inner_loops, 'inner-loop')
        else:
            inner_loops = "%.1f inner-loops (average)" % inner_loops

        text = '%s (%s x %s)' % (text, loops, inner_loops)

    lines.append("Loop iterations per value: %s" % text)
    lines.append("Total number of values: %s" % format_number(nvalue))
    lines.append('')

    # Minimum
    table = []
    table.append(("Minimum", bench.format_value(min(values))))

    # Median +- MAD
    median = bench.median()
    if len(values) > 2:
        median_abs_dev = bench.median_abs_dev()
        table.append(("Median +- MAD",
                      "%s +- %s"
                      % bench.format_values((median, median_abs_dev))))
    else:
        table.append(("Mean", bench.format_value(median)))

    # Mean +- std dev
    mean = bench.mean()
    if len(values) > 2:
        stdev = bench.stdev()
        table.append(("Mean +- std dev",
                      "%s +- %s" % bench.format_values((mean, stdev))))
    else:
        table.append(("Mean", bench.format_value(mean)))

    table.append(("Maximum", bench.format_value(max(values))))

    # Render table
    width = max(len(row[0]) + 1 for row in table)
    for key, value in table:
        key = (key + ':').ljust(width)
        lines.append("%s %s" % (key, value))
    lines.append('')

    def format_limit(mean, value):
        return ("%s (%+.0f%% of the mean)"
                % (fmt(value), (value - mean) * 100.0 / mean))

    # Percentiles
    for p in (0, 5, 25, 50, 75, 95, 100):
        text = format_limit(mean, bench.percentile(p))
        text = "%3sth percentile: %s" % (p, text)
        name = PERCENTILE_NAMES.get(p)
        if name:
            text = '%s -- %s' % (text, name)
        lines.append(text)
    lines.append('')

    # Outliers
    q1 = bench.percentile(25)
    q3 = bench.percentile(75)
    iqr = q3 - q1
    outlier_min = (q1 - 1.5 * iqr)
    outlier_max = (q3 + 1.5 * iqr)
    noutlier = sum(not(outlier_min <= value <= outlier_max)
                   for value in values)
    bounds = bench.format_values((outlier_min, outlier_max))
    lines.append('Number of outlier (out of %s..%s): %s'
                 % (bounds[0], bounds[1], format_number(noutlier)))

    return lines