Esempio n. 1
0
        config.use_threads = False
        for oneTest in aloneTests:
            if stopping():
                break
            oneTest(watcher)
    except KeyboardInterrupt:
        pass

    # flush everything before we continue
    sys.stdout.flush()

    # Warn if had to force skip perf tests (see Note force skip perf tests).
    spacing = "       "
    if forceSkipPerfTests and not args.skip_perf_tests:
        print()
        print(str_warn('Skipping All Performance Tests') + ' `git` exited with non-zero exit code.')
        print(spacing + 'Git is required because performance test results are compared with ancestor git commits\' results (stored with git notes).')
        print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.')

    # Warn of new metrics.
    new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
    if any(new_metrics):
        if inside_git_repo():
            reason = 'a baseline (expected value) cannot be recovered from' + \
                ' previous git commits. This may be due to HEAD having' + \
                ' new tests or having expected changes, the presence of' + \
                ' expected changes since the last run of the tests, and/or' + \
                ' the latest test run being too old.'
            fix = 'If the tests exist on the previous' + \
                ' commit (And are configured to run with the same ways),' + \
                ' then check out that commit and run the tests to generate' + \
Esempio n. 2
0
            heading = 'Metrics: %s' % metric_name
            print()
            print(heading)
            print('-' * len(heading))
            print()
            tabulate_metrics(stats)
    else:
        print("\nNone collected.")
    print("")

    # Warn if had to force skip perf tests (see Note force skip perf tests).
    spacing = "       "
    if forceSkipPerfTests and not args.skip_perf_tests:
        print()
        print(
            str_warn('Skipping All Performance Tests') +
            ' `git` exited with non-zero exit code.')
        print(
            spacing +
            'Git is required because performance test results are compared with ancestor git commits\' results (stored with git notes).'
        )
        print(
            spacing +
            'You can still run the tests without git by specifying an output file with --metrics-file FILE.'
        )

    # Warn of new metrics.
    new_metrics = [
        metric for (change, metric, baseline) in t.metrics
        if change == MetricChange.NewMetric
    ]
Esempio n. 3
0
        config.use_threads = False
        for oneTest in aloneTests:
            if stopping():
                break
            oneTest(watcher)
    except KeyboardInterrupt:
        pass

    # flush everything before we continue
    sys.stdout.flush()

    # Warn if had to force skip perf tests (see Note force skip perf tests).
    spacing = "       "
    if forceSkipPerfTests and not args.skip_perf_tests:
        print()
        print(str_warn('Skipping All Performance Tests') + ' `git` exited with non-zero exit code.')
        print(spacing + 'Git is required because performance test results are compared with ancestor git commits\' results (stored with git notes).')
        print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.')

    # Warn of new metrics.
    new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
    if any(new_metrics):
        if inside_git_repo():
            reason = 'a baseline (expected value) cannot be recovered from' + \
                ' previous git commits. This may be due to HEAD having' + \
                ' new tests or having expected changes, the presence of' + \
                ' expected changes since the last run of the tests, and/or' + \
                ' the latest test run being too old.'
            fix = 'If the tests exist on the previous' + \
                ' commit (And are configured to run with the same ways),' + \
                ' then check out that commit and run the tests to generate' + \
Esempio n. 4
0
        for oneTest in aloneTests:
            if stopping():
                break
            oneTest(watcher)
    except KeyboardInterrupt:
        pass

    # flush everything before we continue
    sys.stdout.flush()

    # Warn if had to force skip perf tests (see Note force skip perf tests).
    spacing = "       "
    if forceSkipPerfTests and not args.skip_perf_tests:
        print()
        print(
            str_warn('Skipping All Performance Tests') +
            ' `git status` exited with non-zero exit code.')
        print(
            spacing +
            'Git is required because performance test results are compared with the previous git commit\'s results (stored with git notes).'
        )
        print(
            spacing +
            'You can still run the tests without git by specifying an output file with --metrics-file FILE.'
        )

    # Warn of new metrics.
    new_metrics = [
        metric for (change, metric) in t.metrics
        if change == MetricChange.NewMetric
    ]
Esempio n. 5
0
        config.use_threads = False
        for oneTest in aloneTests:
            if stopping():
                break
            oneTest(watcher)
    except KeyboardInterrupt:
        pass

    # flush everything before we continue
    sys.stdout.flush()

    # Warn if had to force skip perf tests (see Note force skip perf tests).
    spacing = "       "
    if forceSkipPerfTests and not args.skip_perf_tests:
        print()
        print(str_warn('Skipping All Performance Tests') + ' `git status` exited with non-zero exit code.')
        print(spacing + 'Git is required because performance test results are compared with the previous git commit\'s results (stored with git notes).')
        print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.')

    # Warn of new metrics.
    new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
    if any(new_metrics):
        if canGitStatus:
            reason = 'the previous git commit doesn\'t have recorded metrics for the following tests.' + \
                  ' If the tests exist on the previous commit, then check it out and run the tests to generate the missing metrics.'
        else:
            reason = 'this is not a git repo so the previous git commit\'s metrics cannot be loaded from git notes:'
        print()
        print(str_warn('New Metrics') + ' these metrics trivially pass because ' + reason)
        print(spacing + ('\n' + spacing).join(set([metric.test for metric in new_metrics])))
Esempio n. 6
0
        config.use_threads = False
        for oneTest in aloneTests:
            if stopping():
                break
            oneTest(watcher)
    except KeyboardInterrupt:
        pass

    # flush everything before we continue
    sys.stdout.flush()

    # Warn if had to force skip perf tests (see Note force skip perf tests).
    spacing = "       "
    if forceSkipPerfTests and not args.skip_perf_tests:
        print()
        print(str_warn('Skipping All Performance Tests') + ' `git status` exited with non-zero exit code.')
        print(spacing + 'Git is required because performance test results are compared with the previous git commit\'s results (stored with git notes).')
        print(spacing + 'You can still run the tests without git by specifying an output file with --metrics-file FILE.')

    # Warn of new metrics.
    new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
    if any(new_metrics):
        if canGitStatus:
            reason = 'the previous git commit doesn\'t have recorded metrics for the following tests.' + \
                  ' If the tests exist on the previous commit, then check it out and run the tests to generate the missing metrics.'
        else:
            reason = 'this is not a git repo so the previous git commit\'s metrics cannot be loaded from git notes:'
        print()
        print(str_warn('New Metrics') + ' these metrics trivially pass because ' + reason)
        print(spacing + ('\n' + spacing).join(set([metric.test for metric in new_metrics])))
Esempio n. 7
0
    except KeyboardInterrupt:
        pass

    # flush everything before we continue
    sys.stdout.flush()

    # Warn of new metrics.
    new_metrics = [
        metric for (change, metric) in t.metrics
        if change == MetricChange.NewMetric
    ]
    spacing = "    "
    if any(new_metrics):
        print()
        print(
            str_warn('New Metrics') +
            ' the previous git commit doesn\'t have metrics for the following tests:'
        )
        print(spacing +
              ('\n' +
               spacing).join(set([metric.test for metric in new_metrics])))

    # Inform of how to accept metric changes.
    if (len(t.unexpected_stat_failures) > 0):
        print()
        print(
            str_info("Some stats have changed") +
            " If this is expected, allow changes by appending the git commit message with this:"
        )
        print('-' * 25)
        print(Perf.allow_changes_string(t.metrics))