def _RunBuildStepForPerformanceTest(bisect_instance, build_string, revision):
  if revision:
    bisect_utils.OutputAnnotationStepStart('Syncing [%s]' % revision)
    if not source_control.SyncToRevision(revision, 'gclient'):
      raise RuntimeError('Failed to sync to [%s].' % revision)
    bisect_utils.OutputAnnotationStepClosed()

  bisect_utils.OutputAnnotationStepStart(build_string)

  if bisect_utils.RunGClient(['runhooks']):
    raise RuntimeError('Failed to run gclient runhooks')

  if not bisect_instance.ObtainBuild('chromium'):
    raise RuntimeError('Patched version failed to build.')

  bisect_utils.OutputAnnotationStepClosed()
예제 #2
0
def _PrintConfigStep(config):
    """Prints out the given config, along with Buildbot annotations."""
    bisect_utils.OutputAnnotationStepStart('Config')
    print
    for k, v in config.iteritems():
        print '  %s : %s' % (k, v)
    print
    bisect_utils.OutputAnnotationStepClosed()
예제 #3
0
def _ParseAndOutputCloudLinks(results_without_patch, results_with_patch,
                              annotations_dict):
    cloud_links_without_patch = _ParseCloudLinksFromOutput(
        results_without_patch[2])
    cloud_links_with_patch = _ParseCloudLinksFromOutput(results_with_patch[2])

    cloud_file_link = (cloud_links_without_patch['html-results'][0]
                       if cloud_links_without_patch['html-results'] else '')

    profiler_file_links_with_patch = cloud_links_with_patch['profiler']
    profiler_file_links_without_patch = cloud_links_without_patch['profiler']

    # Calculate the % difference in the means of the 2 runs.
    percent_diff_in_means = None
    std_err = None
    if (results_with_patch[0].has_key('mean')
            and results_with_patch[0].has_key('values')):
        percent_diff_in_means = (results_with_patch[0]['mean'] / max(
            0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
        std_err = math_utils.PooledStandardError([
            results_with_patch[0]['values'], results_without_patch[0]['values']
        ])

    if percent_diff_in_means is not None and std_err is not None:
        bisect_utils.OutputAnnotationStepStart(
            'Results - %.02f +- %0.02f delta' %
            (percent_diff_in_means, std_err))
        print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(
            20, ' '), 'Std. Error'.center(20, ' '))
        print ' %s %s %s' % (
            'Patch'.center(10, ' '),
            ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
            ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
        print ' %s %s %s' % (
            'No Patch'.center(10, ' '),
            ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
            ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
        if cloud_file_link:
            bisect_utils.OutputAnnotationStepLink('HTML Results',
                                                  cloud_file_link)
        bisect_utils.OutputAnnotationStepClosed()
    elif cloud_file_link:
        bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)

    if profiler_file_links_with_patch and profiler_file_links_without_patch:
        for i in xrange(len(profiler_file_links_with_patch)):
            bisect_utils.OutputAnnotationStepLink(
                '%s[%d]' % (annotations_dict.get('profiler_link1'), i),
                profiler_file_links_with_patch[i])
        for i in xrange(len(profiler_file_links_without_patch)):
            bisect_utils.OutputAnnotationStepLink(
                '%s[%d]' % (annotations_dict.get('profiler_link2'), i),
                profiler_file_links_without_patch[i])
def _RunPerformanceTest(config):
  """Runs a performance test with and without the current patch.

  Args:
    config: Contents of the config file, a dictionary.

  Attempts to build and run the current revision with and without the
  current patch, with the parameters passed in.
  """
  # Bisect script expects to be run from the src directory
  os.chdir(SRC_DIR)

  opts = _CreateBisectOptionsFromConfig(config)
  revisions = _ResolveRevisionsFromConfig(config)
  annotations_dict = _GetStepAnnotationStringsDict(config)
  b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())

  _RunBuildStepForPerformanceTest(b,
                                  annotations_dict.get('build1'),
                                  annotations_dict.get('sync1'),
                                  revisions[0])

  results_with_patch = _RunCommandStepForPerformanceTest(
      b, opts, True, True, annotations_dict['results_label1'],
      annotations_dict['run1'])

  bisect_utils.OutputAnnotationStepStart('Reverting Patch')
  # TODO: When this is re-written to recipes, this should use bot_update's
  # revert mechanism to fully revert the client. But for now, since we know that
  # the perf try bot currently only supports src/ and src/third_party/WebKit, we
  # simply reset those two directories.
  bisect_utils.CheckRunGit(['reset', '--hard'])
  bisect_utils.CheckRunGit(['reset', '--hard'],
                           os.path.join('third_party', 'WebKit'))
  bisect_utils.OutputAnnotationStepClosed()

  _RunBuildStepForPerformanceTest(b,
                                  annotations_dict.get('build2'),
                                  annotations_dict.get('sync2'),
                                  revisions[1])

  results_without_patch = _RunCommandStepForPerformanceTest(
      b, opts, False, True, annotations_dict['results_label2'],
      annotations_dict['run2'])

  # Find the link to the cloud stored results file.
  _ParseAndOutputCloudLinks(
      results_without_patch, results_with_patch, annotations_dict)
예제 #5
0
def _OutputOverallResults(results):
    """Creates results step and prints results on buildbot job."""
    test_status = all(current_value == True
                      for current_value in results.values())
    bisect_utils.OutputAnnotationStepStart(
        'Results - %s' % ('Passed' if test_status else 'Failed'))
    print
    print 'Results of benchmarks:'
    print
    for benchmark, result in results.iteritems():
        print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
    if not test_status:
        bisect_utils.OutputAnnotationStepFailure()
    bisect_utils.OutputAnnotationStepClosed()
    # Returns 0 for success and 1 for failure.
    return 0 if test_status else 1
예제 #6
0
def _RunCommandStepForPerformanceTest(bisect_instance, opts,
                                      reset_on_first_run, upload_on_last_run,
                                      results_label, run_string):
    bisect_utils.OutputAnnotationStepStart(run_string)

    results = bisect_instance.RunPerformanceTestAndParseResults(
        opts.command,
        opts.metric,
        reset_on_first_run=reset_on_first_run,
        upload_on_last_run=upload_on_last_run,
        results_label=results_label,
        allow_flakes=False)

    if results[1]:
        raise RuntimeError('Patched version failed to run performance test.')

    bisect_utils.OutputAnnotationStepClosed()

    return results
예제 #7
0
def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
    """Runs a Telemetry benchmark."""
    bisect_utils.OutputAnnotationStepStart(benchmark_name)
    command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name)
    args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
    output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
        args, SRC_DIR)
    # A value other than 0 indicates that the test couldn't be run, and results
    # should also include an error message.
    if return_code:
        print(
            'Error: Something went wrong running the benchmark: %s.'
            'Please review the command line:%s\n\n%s' %
            (benchmark_name, command_to_run, output))
        bisect_utils.OutputAnnotationStepFailure()
    print output
    bisect_utils.OutputAnnotationStepClosed()
    # results[1] contains the return code from subprocess that executes test
    # command, On successful test run it contains 0 otherwise any non-zero value.
    return return_code == 0
예제 #8
0
def _RunBenchmarksForCommitQueue(config):
    """Runs Telemetry benchmark for the commit queue."""
    os.chdir(SRC_DIR)
    # To determine the bot platform by reading buildbot name from environment
    # variable.
    bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
    if not bot_name:
        bot_name = sys.platform
    bot_name = bot_name.split('_')[0]

    affected_benchmarks = _GetAffectedBenchmarkModuleNames()
    # Abort if there are no changes to benchmark any existing benchmark files.
    if not affected_benchmarks:
        bisect_utils.OutputAnnotationStepStart('Results')
        print
        print(
            'There are no modification to Telemetry benchmarks,'
            ' aborting the try job.')
        bisect_utils.OutputAnnotationStepClosed()
        return 0

    # Bisect script expects to be run from the src directory
    # Gets required options inorder to create BisectPerformanceMetrics instance.
    # Since command is a required arg in BisectPerformanceMetrics, we just create
    # a dummy command for now.
    opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
    annotations_dict = _GetStepAnnotationStringsDict(config)
    b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
    _RunBuildStepForPerformanceTest(b, annotations_dict.get('build1'),
                                    annotations_dict.get('sync1'), None)
    available_benchmarks = _ListAvailableBenchmarks(bot_name)
    overall_results = {}
    for affected_benchmark in affected_benchmarks:
        for benchmark in available_benchmarks:
            if (benchmark.startswith(affected_benchmark)
                    and not benchmark.endswith('reference')):
                overall_results[benchmark] = _RunBenchmark(
                    b, opts, bot_name, benchmark)

    return _OutputOverallResults(overall_results)
예제 #9
0
def _OutputFailedResults(text_to_print):
    bisect_utils.OutputAnnotationStepStart('Results - Failed')
    print
    print text_to_print
    print
    bisect_utils.OutputAnnotationStepClosed()
예제 #10
0
def _RunPerformanceTest(config, path_to_file):
    """Runs a performance test with and without the current patch.

  Args:
    config: Contents of the config file, a dictionary.
    path_to_file: Path to the bisect-perf-regression.py script.

  Attempts to build and run the current revision with and without the
  current patch, with the parameters passed in.
  """
    # Bisect script expects to be run from the src directory
    os.chdir(os.path.join(path_to_file, '..'))

    bisect_utils.OutputAnnotationStepStart('Building With Patch')

    opts = _CreateBisectOptionsFromConfig(config)
    b = bisect.BisectPerformanceMetrics(None, opts)

    if bisect_utils.RunGClient(['runhooks']):
        raise RuntimeError('Failed to run gclient runhooks')

    if not b.BuildCurrentRevision('chromium'):
        raise RuntimeError('Patched version failed to build.')

    bisect_utils.OutputAnnotationStepClosed()
    bisect_utils.OutputAnnotationStepStart('Running With Patch')

    results_with_patch = b.RunPerformanceTestAndParseResults(
        opts.command,
        opts.metric,
        reset_on_first_run=True,
        results_label='Patch')

    if results_with_patch[1]:
        raise RuntimeError('Patched version failed to run performance test.')

    bisect_utils.OutputAnnotationStepClosed()

    bisect_utils.OutputAnnotationStepStart('Reverting Patch')
    # TODO: When this is re-written to recipes, this should use bot_update's
    # revert mechanism to fully revert the client. But for now, since we know that
    # the perf trybot currently only supports src/ and src/third_party/WebKit, we
    # simply reset those two directories.
    bisect_utils.CheckRunGit(['reset', '--hard'])
    bisect_utils.CheckRunGit(['reset', '--hard'],
                             os.path.join('third_party', 'WebKit'))
    bisect_utils.OutputAnnotationStepClosed()

    bisect_utils.OutputAnnotationStepStart('Building Without Patch')

    if bisect_utils.RunGClient(['runhooks']):
        raise RuntimeError('Failed to run gclient runhooks')

    if not b.BuildCurrentRevision('chromium'):
        raise RuntimeError('Unpatched version failed to build.')

    bisect_utils.OutputAnnotationStepClosed()
    bisect_utils.OutputAnnotationStepStart('Running Without Patch')

    results_without_patch = b.RunPerformanceTestAndParseResults(
        opts.command,
        opts.metric,
        upload_on_last_run=True,
        results_label='ToT')

    if results_without_patch[1]:
        raise RuntimeError('Unpatched version failed to run performance test.')

    # Find the link to the cloud stored results file.
    output = results_without_patch[2]
    cloud_file_link = [
        t for t in output.splitlines()
        if 'storage.googleapis.com/chromium-telemetry/html-results/' in t
    ]
    if cloud_file_link:
        # What we're getting here is basically "View online at http://..." so parse
        # out just the url portion.
        cloud_file_link = cloud_file_link[0]
        cloud_file_link = [
            t for t in cloud_file_link.split(' ')
            if 'storage.googleapis.com/chromium-telemetry/html-results/' in t
        ]
        assert cloud_file_link, "Couldn't parse url from output."
        cloud_file_link = cloud_file_link[0]
    else:
        cloud_file_link = ''

    # Calculate the % difference in the means of the 2 runs.
    percent_diff_in_means = None
    std_err = None
    if (results_with_patch[0].has_key('mean')
            and results_with_patch[0].has_key('values')):
        percent_diff_in_means = (results_with_patch[0]['mean'] / max(
            0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
        std_err = math_utils.PooledStandardError([
            results_with_patch[0]['values'], results_without_patch[0]['values']
        ])

    bisect_utils.OutputAnnotationStepClosed()
    if percent_diff_in_means is not None and std_err is not None:
        bisect_utils.OutputAnnotationStepStart(
            'Results - %.02f +- %0.02f delta' %
            (percent_diff_in_means, std_err))
        print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(
            20, ' '), 'Std. Error'.center(20, ' '))
        print ' %s %s %s' % (
            'Patch'.center(10, ' '),
            ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
            ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
        print ' %s %s %s' % (
            'No Patch'.center(10, ' '),
            ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
            ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
        if cloud_file_link:
            bisect_utils.OutputAnnotationStepLink('HTML Results',
                                                  cloud_file_link)
        bisect_utils.OutputAnnotationStepClosed()
    elif cloud_file_link:
        bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)