def _RunPerformanceTest(config):
  """Runs a performance test with and without the current patch.

  Args:
    config: Contents of the config file, a dictionary.

  Attempts to build and run the current revision with and without the
  current patch, with the parameters passed in.
  """
  # Bisect script expects to be run from the src directory
  os.chdir(SRC_DIR)

  opts = _CreateBisectOptionsFromConfig(config)
  revisions = _ResolveRevisionsFromConfig(config)
  annotations_dict = _GetStepAnnotationStringsDict(config)
  b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())

  _RunBuildStepForPerformanceTest(b,
                                  annotations_dict.get('build1'),
                                  annotations_dict.get('sync1'),
                                  revisions[0])

  results_with_patch = _RunCommandStepForPerformanceTest(
      b, opts, True, True, annotations_dict['results_label1'],
      annotations_dict['run1'])

  bisect_utils.OutputAnnotationStepStart('Reverting Patch')
  # TODO: When this is re-written to recipes, this should use bot_update's
  # revert mechanism to fully revert the client. But for now, since we know that
  # the perf try bot currently only supports src/ and src/third_party/WebKit, we
  # simply reset those two directories.
  bisect_utils.CheckRunGit(['reset', '--hard'])
  bisect_utils.CheckRunGit(['reset', '--hard'],
                           os.path.join('third_party', 'WebKit'))
  bisect_utils.OutputAnnotationStepClosed()

  _RunBuildStepForPerformanceTest(b,
                                  annotations_dict.get('build2'),
                                  annotations_dict.get('sync2'),
                                  revisions[1])

  results_without_patch = _RunCommandStepForPerformanceTest(
      b, opts, False, True, annotations_dict['results_label2'],
      annotations_dict['run2'])

  # Find the link to the cloud stored results file.
  _ParseAndOutputCloudLinks(
      results_without_patch, results_with_patch, annotations_dict)
Beispiel #2
0
def _RunBenchmarksForCommitQueue(config):
    """Runs Telemetry benchmark for the commit queue."""
    os.chdir(SRC_DIR)
    # To determine the bot platform by reading buildbot name from environment
    # variable.
    bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
    if not bot_name:
        bot_name = sys.platform
    bot_name = bot_name.split('_')[0]

    affected_benchmarks = _GetAffectedBenchmarkModuleNames()
    # Abort if there are no changes to benchmark any existing benchmark files.
    if not affected_benchmarks:
        bisect_utils.OutputAnnotationStepStart('Results')
        print
        print(
            'There are no modification to Telemetry benchmarks,'
            ' aborting the try job.')
        bisect_utils.OutputAnnotationStepClosed()
        return 0

    # Bisect script expects to be run from the src directory
    # Gets required options inorder to create BisectPerformanceMetrics instance.
    # Since command is a required arg in BisectPerformanceMetrics, we just create
    # a dummy command for now.
    opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
    annotations_dict = _GetStepAnnotationStringsDict(config)
    b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
    _RunBuildStepForPerformanceTest(b, annotations_dict.get('build1'),
                                    annotations_dict.get('sync1'), None)
    available_benchmarks = _ListAvailableBenchmarks(bot_name)
    overall_results = {}
    for affected_benchmark in affected_benchmarks:
        for benchmark in available_benchmarks:
            if (benchmark.startswith(affected_benchmark)
                    and not benchmark.endswith('reference')):
                overall_results[benchmark] = _RunBenchmark(
                    b, opts, bot_name, benchmark)

    return _OutputOverallResults(overall_results)