def _RunPerformanceTest(config):
  """Runs a performance test with and without the current patch.

  Args:
    config: Contents of the config file, a dictionary.

  Attempts to build and run the current revision with and without the
  current patch, with the parameters passed in.
  """
  # Bisect script expects to be run from the src directory
  os.chdir(SRC_DIR)

  opts = _CreateBisectOptionsFromConfig(config)
  revisions = _ResolveRevisionsFromConfig(config)
  annotations_dict = _GetStepAnnotationStringsDict(config)
  b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())

  _RunBuildStepForPerformanceTest(b,
                                  annotations_dict.get('build1'),
                                  annotations_dict.get('sync1'),
                                  revisions[0])

  results_with_patch = _RunCommandStepForPerformanceTest(
      b, opts, True, True, annotations_dict['results_label1'],
      annotations_dict['run1'])

  bisect_utils.OutputAnnotationStepStart('Reverting Patch')
  # TODO: When this is re-written to recipes, this should use bot_update's
  # revert mechanism to fully revert the client. But for now, since we know that
  # the perf try bot currently only supports src/ and src/third_party/WebKit, we
  # simply reset those two directories.
  bisect_utils.CheckRunGit(['reset', '--hard'])
  bisect_utils.CheckRunGit(['reset', '--hard'],
                           os.path.join('third_party', 'WebKit'))
  bisect_utils.OutputAnnotationStepClosed()

  _RunBuildStepForPerformanceTest(b,
                                  annotations_dict.get('build2'),
                                  annotations_dict.get('sync2'),
                                  revisions[1])

  results_without_patch = _RunCommandStepForPerformanceTest(
      b, opts, False, True, annotations_dict['results_label2'],
      annotations_dict['run2'])

  # Find the link to the cloud stored results file.
  _ParseAndOutputCloudLinks(
      results_without_patch, results_with_patch, annotations_dict)
Exemple #2
0
def _GetModifiedFilesFromPatch(cwd=None):
    """Gets list of files modified in the current patch."""
    log_output = bisect_utils.CheckRunGit(
        ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
    modified_files = log_output.split()
    return modified_files
Exemple #3
0
def _RunPerformanceTest(config, path_to_file):
    """Runs a performance test with and without the current patch.

  Args:
    config: Contents of the config file, a dictionary.
    path_to_file: Path to the bisect-perf-regression.py script.

  Attempts to build and run the current revision with and without the
  current patch, with the parameters passed in.
  """
    # Bisect script expects to be run from the src directory
    os.chdir(os.path.join(path_to_file, '..'))

    bisect_utils.OutputAnnotationStepStart('Building With Patch')

    opts = _CreateBisectOptionsFromConfig(config)
    b = bisect.BisectPerformanceMetrics(None, opts)

    if bisect_utils.RunGClient(['runhooks']):
        raise RuntimeError('Failed to run gclient runhooks')

    if not b.BuildCurrentRevision('chromium'):
        raise RuntimeError('Patched version failed to build.')

    bisect_utils.OutputAnnotationStepClosed()
    bisect_utils.OutputAnnotationStepStart('Running With Patch')

    results_with_patch = b.RunPerformanceTestAndParseResults(
        opts.command,
        opts.metric,
        reset_on_first_run=True,
        results_label='Patch')

    if results_with_patch[1]:
        raise RuntimeError('Patched version failed to run performance test.')

    bisect_utils.OutputAnnotationStepClosed()

    bisect_utils.OutputAnnotationStepStart('Reverting Patch')
    # TODO: When this is re-written to recipes, this should use bot_update's
    # revert mechanism to fully revert the client. But for now, since we know that
    # the perf trybot currently only supports src/ and src/third_party/WebKit, we
    # simply reset those two directories.
    bisect_utils.CheckRunGit(['reset', '--hard'])
    bisect_utils.CheckRunGit(['reset', '--hard'],
                             os.path.join('third_party', 'WebKit'))
    bisect_utils.OutputAnnotationStepClosed()

    bisect_utils.OutputAnnotationStepStart('Building Without Patch')

    if bisect_utils.RunGClient(['runhooks']):
        raise RuntimeError('Failed to run gclient runhooks')

    if not b.BuildCurrentRevision('chromium'):
        raise RuntimeError('Unpatched version failed to build.')

    bisect_utils.OutputAnnotationStepClosed()
    bisect_utils.OutputAnnotationStepStart('Running Without Patch')

    results_without_patch = b.RunPerformanceTestAndParseResults(
        opts.command,
        opts.metric,
        upload_on_last_run=True,
        results_label='ToT')

    if results_without_patch[1]:
        raise RuntimeError('Unpatched version failed to run performance test.')

    # Find the link to the cloud stored results file.
    output = results_without_patch[2]
    cloud_file_link = [
        t for t in output.splitlines()
        if 'storage.googleapis.com/chromium-telemetry/html-results/' in t
    ]
    if cloud_file_link:
        # What we're getting here is basically "View online at http://..." so parse
        # out just the url portion.
        cloud_file_link = cloud_file_link[0]
        cloud_file_link = [
            t for t in cloud_file_link.split(' ')
            if 'storage.googleapis.com/chromium-telemetry/html-results/' in t
        ]
        assert cloud_file_link, "Couldn't parse url from output."
        cloud_file_link = cloud_file_link[0]
    else:
        cloud_file_link = ''

    # Calculate the % difference in the means of the 2 runs.
    percent_diff_in_means = None
    std_err = None
    if (results_with_patch[0].has_key('mean')
            and results_with_patch[0].has_key('values')):
        percent_diff_in_means = (results_with_patch[0]['mean'] / max(
            0.0001, results_without_patch[0]['mean'])) * 100.0 - 100.0
        std_err = math_utils.PooledStandardError([
            results_with_patch[0]['values'], results_without_patch[0]['values']
        ])

    bisect_utils.OutputAnnotationStepClosed()
    if percent_diff_in_means is not None and std_err is not None:
        bisect_utils.OutputAnnotationStepStart(
            'Results - %.02f +- %0.02f delta' %
            (percent_diff_in_means, std_err))
        print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(
            20, ' '), 'Std. Error'.center(20, ' '))
        print ' %s %s %s' % (
            'Patch'.center(10, ' '),
            ('%.02f' % results_with_patch[0]['mean']).center(20, ' '),
            ('%.02f' % results_with_patch[0]['std_err']).center(20, ' '))
        print ' %s %s %s' % (
            'No Patch'.center(10, ' '),
            ('%.02f' % results_without_patch[0]['mean']).center(20, ' '),
            ('%.02f' % results_without_patch[0]['std_err']).center(20, ' '))
        if cloud_file_link:
            bisect_utils.OutputAnnotationStepLink('HTML Results',
                                                  cloud_file_link)
        bisect_utils.OutputAnnotationStepClosed()
    elif cloud_file_link:
        bisect_utils.OutputAnnotationStepLink('HTML Results', cloud_file_link)