Ejemplo n.º 1
0
            reason = "this is not a git repo so the previous git commit's" + \
                     " metrics cannot be loaded from git notes:"
            fix = ""
        print()
        print(str_warn('Missing Baseline Metrics') + \
                ' these metrics trivially pass because ' + reason)
        print(spacing + (' ').join(set([metric.test
                                        for metric in new_metrics])))
        if fix != "":
            print()
            print(fix)

    # Inform of how to accept metric changes.
    if (len(t.unexpected_stat_failures) > 0):
        print()
        print(str_info("Some stats have changed") + " If this is expected, " + \
            "allow changes by appending the git commit message with this:")
        print('-' * 25)
        print(
            Perf.allow_changes_string([(m.change, m.stat) for m in t.metrics]))
        print('-' * 25)

    summary(t, sys.stdout, config.no_print_summary, config.supports_colors)

    # Write perf stats if any exist or if a metrics file is specified.
    stats_metrics = [stat
                     for (_, stat, __) in t.metrics]  # type: List[PerfStat]
    if hasMetricsFile:
        print('Appending ' + str(len(stats_metrics)) + ' stats to file: ' +
              config.metrics_file)
        with open(config.metrics_file, 'a') as f:
Ejemplo n.º 2
0
        else:
            reason = "this is not a git repo so the previous git commit's" + \
                     " metrics cannot be loaded from git notes:"
            fix = ""
        print()
        print(str_warn('Missing Baseline Metrics') + \
                ' these metrics trivially pass because ' + reason)
        print(spacing + (' ').join(set([metric.test for metric in new_metrics])))
        if fix != "":
            print()
            print(fix)

    # Inform of how to accept metric changes.
    if (len(t.unexpected_stat_failures) > 0):
        print()
        print(str_info("Some stats have changed") + " If this is expected, " + \
            "allow changes by appending the git commit message with this:")
        print('-' * 25)
        print(Perf.allow_changes_string(t.metrics))
        print('-' * 25)

    summary(t, sys.stdout, config.no_print_summary, True)

    # Write perf stats if any exist or if a metrics file is specified.
    stats = [stat for (_, stat) in t.metrics]
    if hasMetricsFile:
        print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
        with open(config.metrics_file, 'a') as file:
            file.write("\n" + Perf.format_perf_stat(stats))
    elif inside_git_repo() and any(stats):
        if is_worktree_dirty():
Ejemplo n.º 3
0
    # Warn of new metrics.
    new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
    if any(new_metrics):
        if canGitStatus:
            reason = 'the previous git commit doesn\'t have recorded metrics for the following tests.' + \
                  ' If the tests exist on the previous commit, then check it out and run the tests to generate the missing metrics.'
        else:
            reason = 'this is not a git repo so the previous git commit\'s metrics cannot be loaded from git notes:'
        print()
        print(str_warn('New Metrics') + ' these metrics trivially pass because ' + reason)
        print(spacing + ('\n' + spacing).join(set([metric.test for metric in new_metrics])))

    # Inform of how to accept metric changes.
    if (len(t.unexpected_stat_failures) > 0):
        print()
        print(str_info("Some stats have changed") + " If this is expected, allow changes by appending the git commit message with this:")
        print('-' * 25)
        print(Perf.allow_changes_string(t.metrics))
        print('-' * 25)

    summary(t, sys.stdout, config.no_print_summary, True)

    # Write perf stats if any exist or if a metrics file is specified.
    stats = [stat for (_, stat) in t.metrics]
    if hasMetricsFile:
        print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
        with open(config.metrics_file, 'a') as file:
            file.write("\n" + Perf.format_perf_stat(stats))
    elif canGitStatus and any(stats):
        if is_worktree_dirty():
            print()
Ejemplo n.º 4
0
                  ' If the tests exist on the previous commit, then check it out and run the tests to generate the missing metrics.'
        else:
            reason = 'this is not a git repo so the previous git commit\'s metrics cannot be loaded from git notes:'
        print()
        print(
            str_warn('New Metrics') +
            ' these metrics trivially pass because ' + reason)
        print(spacing +
              ('\n' +
               spacing).join(set([metric.test for metric in new_metrics])))

    # Inform of how to accept metric changes.
    if (len(t.unexpected_stat_failures) > 0):
        print()
        print(
            str_info("Some stats have changed") +
            " If this is expected, allow changes by appending the git commit message with this:"
        )
        print('-' * 25)
        print(Perf.allow_changes_string(t.metrics))
        print('-' * 25)

    summary(t, sys.stdout, config.no_print_summary, True)

    # Write perf stats if any exist or if a metrics file is specified.
    stats = [stat for (_, stat) in t.metrics]
    if hasMetricsFile:
        print('Appending ' + str(len(stats)) + ' stats to file: ' +
              config.metrics_file)
        with open(config.metrics_file, 'a') as file:
            file.write("\n" + Perf.format_perf_stat(stats))
Ejemplo n.º 5
0
    # Warn of new metrics.
    new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
    if any(new_metrics):
        if canGitStatus:
            reason = 'the previous git commit doesn\'t have recorded metrics for the following tests.' + \
                  ' If the tests exist on the previous commit, then check it out and run the tests to generate the missing metrics.'
        else:
            reason = 'this is not a git repo so the previous git commit\'s metrics cannot be loaded from git notes:'
        print()
        print(str_warn('New Metrics') + ' these metrics trivially pass because ' + reason)
        print(spacing + ('\n' + spacing).join(set([metric.test for metric in new_metrics])))

    # Inform of how to accept metric changes.
    if (len(t.unexpected_stat_failures) > 0):
        print()
        print(str_info("Some stats have changed") + " If this is expected, allow changes by appending the git commit message with this:")
        print('-' * 25)
        print(Perf.allow_changes_string(t.metrics))
        print('-' * 25)

    summary(t, sys.stdout, config.no_print_summary, True)

    # Write perf stats if any exist or if a metrics file is specified.
    stats = [stat for (_, stat) in t.metrics]
    if hasMetricsFile:
        print('Appending ' + str(len(stats)) + ' stats to file: ' + config.metrics_file)
        with open(config.metrics_file, 'a') as file:
            file.write("\n" + Perf.format_perf_stat(stats))
    elif canGitStatus and any(stats):
        Perf.append_perf_stat(stats)