Example #1
0
def execute_benchmark(benchmark,
                      isolated_out_dir,
                      args,
                      rest_args,
                      is_reference,
                      stories=None):
    start = time.time()
    # While we are between chartjson and histogram set we need
    # to determine which output format to look for or see if it was
    # already passed in in which case that format applies to all benchmarks
    # in this run.
    is_histograms = append_output_format(args, rest_args)
    # Insert benchmark name as first argument to run_benchmark call
    # which is the first argument in the rest_args.  Also need to append
    # output format and smoke test mode.
    per_benchmark_args = (rest_args[:1] + [benchmark] + rest_args[1:])
    benchmark_name = benchmark
    if is_reference:
        # Need to parse out the browser to replace browser flag with
        # reference build so we run it reference build as well
        browser_index = 0
        for arg in per_benchmark_args:
            if "browser" in arg:
                break
            browser_index = browser_index + 1
        per_benchmark_args[browser_index] = '--browser=reference'
        # Now we need to add in the rest of the reference build args
        per_benchmark_args.append('--max-failures=5')
        per_benchmark_args.append('--output-trace-tag=_ref')
        benchmark_name = benchmark + '.reference'

    # If we are only running a subset of stories, add in the begin and end
    # index.
    if stories:
        if 'begin' in stories.keys():
            per_benchmark_args.append(
                ('--experimental-story-shard-begin-index=%d' %
                 stories['begin']))
        if 'end' in stories.keys():
            per_benchmark_args.append(
                ('--experimental-story-shard-end-index=%d' % stories['end']))

    # We don't care exactly what these are. In particular, the perf results
    # could be any format (chartjson, legacy, histogram). We just pass these
    # through, and expose these as results for this task.
    rc, perf_results, json_test_results, benchmark_log = (
        run_telemetry_benchmark_as_googletest.run_benchmark(
            args, per_benchmark_args, is_histograms))

    write_results(benchmark_name, perf_results, json_test_results,
                  benchmark_log, isolated_out_dir, False)

    print_duration('executing benchmark %s' % benchmark_name, start)
    return rc
Example #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--isolated-script-test-output', required=True)
    parser.add_argument('--isolated-script-test-chartjson-output',
                        required=False)
    parser.add_argument('--isolated-script-test-perf-output', required=False)
    parser.add_argument('--isolated-script-test-filter',
                        type=str,
                        required=False)
    parser.add_argument('--xvfb', help='Start xvfb.', action='store_true')
    parser.add_argument('--output-format', action='append')
    parser.add_argument('--builder', required=True)
    parser.add_argument(
        '--bot',
        required=True,
        help='Bot ID to use to determine which tests to run. Will'
        ' use //tools/perf/core/benchmark_sharding_map.json'
        ' with this as a key to determine which benchmarks'
        ' to execute')

    args, rest_args = parser.parse_known_args()
    for output_format in args.output_format:
        rest_args.append('--output-format=' + output_format)
    isolated_out_dir = os.path.dirname(args.isolated_script_test_output)

    with open(sharding_map_path()) as f:
        sharding_map = json.load(f)
    sharding = sharding_map[args.builder][args.bot]['benchmarks']
    return_code = 0

    for benchmark in sharding:
        # Insert benchmark name as first argument to run_benchmark call
        per_benchmark_args = rest_args[:1] + [benchmark] + rest_args[1:]
        # We don't care exactly what these are. In particular, the perf results
        # could be any format (chartjson, legacy, histogram). We just pass these
        # through, and expose these as results for this task.
        rc, perf_results, json_test_results = (
            run_telemetry_benchmark_as_googletest.run_benchmark(
                args, per_benchmark_args))

        return_code = return_code or rc
        benchmark_path = os.path.join(isolated_out_dir, benchmark)
        os.makedirs(benchmark_path)
        with open(os.path.join(benchmark_path, 'perf_results.json'), 'w') as f:
            json.dump(perf_results, f)
        with open(os.path.join(benchmark_path, 'test_results.json'), 'w') as f:
            json.dump(json_test_results, f)

    return return_code
def execute_benchmark(benchmark, isolated_out_dir,
                      args, rest_args, is_reference):
  # While we are between chartjson and histogram set we need
  # to determine which output format to look for.
  # We need to append this both to the args and the per benchmark
  # args so the run_benchmark call knows what format it is
  # as well as triggers the benchmark correctly.
  output_format = None
  is_histograms = False
  if benchmark in BENCHMARKS_TO_OUTPUT_HISTOGRAMS:
    output_format = '--output-format=histograms'
    is_histograms = True
  else:
    output_format = '--output-format=chartjson'
  # Need to run the benchmark twice on browser and reference build
  # Insert benchmark name as first argument to run_benchmark call
  # Need to append output format.
  per_benchmark_args = (rest_args[:1] + [benchmark]
                        + rest_args[1:] + [output_format])
  benchmark_path = None
  if is_reference:
    # Need to parse out the browser to replace browser flag with
    # reference build so we run it reference build as well
    browser_index = 0
    for arg in per_benchmark_args:
      if "browser" in arg:
        break
      browser_index = browser_index + 1
    per_benchmark_args[browser_index] = '--browser=reference'
    # Now we need to add in the rest of the reference build args
    per_benchmark_args.append('--max-failures=5')
    per_benchmark_args.append('--output-trace-tag=_ref')
    benchmark_path = os.path.join(isolated_out_dir, benchmark + '.reference')
  else:
    benchmark_path = os.path.join(isolated_out_dir, benchmark)

  # We don't care exactly what these are. In particular, the perf results
  # could be any format (chartjson, legacy, histogram). We just pass these
  # through, and expose these as results for this task.
  rc, perf_results, json_test_results = (
      run_telemetry_benchmark_as_googletest.run_benchmark(
          args, per_benchmark_args, is_histograms))

  os.makedirs(benchmark_path)
  with open(os.path.join(benchmark_path, 'perf_results.json'), 'w') as f:
    json.dump(perf_results, f)
  with open(os.path.join(benchmark_path, 'test_results.json'), 'w') as f:
    json.dump(json_test_results, f)
  return rc
def execute_benchmark(benchmark, isolated_out_dir, args, rest_args,
                      is_reference):
    # While we are between chartjson and histogram set we need
    # to determine which output format to look for or see if it was
    # already passed in in which case that format applies to all benchmarks
    # in this run.
    is_histograms = append_output_format(benchmark, args, rest_args)
    # Insert benchmark name as first argument to run_benchmark call
    # which is the first argument in the rest_args.  Also need to append
    # output format.
    per_benchmark_args = (rest_args[:1] + [benchmark] + rest_args[1:])
    benchmark_name = benchmark
    if is_reference:
        # Need to parse out the browser to replace browser flag with
        # reference build so we run it reference build as well
        browser_index = 0
        for arg in per_benchmark_args:
            if "browser" in arg:
                break
            browser_index = browser_index + 1
        per_benchmark_args[browser_index] = '--browser=reference'
        # Now we need to add in the rest of the reference build args
        per_benchmark_args.append('--max-failures=5')
        per_benchmark_args.append('--output-trace-tag=_ref')
        benchmark_name = benchmark + '.reference'

    # We don't care exactly what these are. In particular, the perf results
    # could be any format (chartjson, legacy, histogram). We just pass these
    # through, and expose these as results for this task.
    rc, perf_results, json_test_results = (
        run_telemetry_benchmark_as_googletest.run_benchmark(
            args, per_benchmark_args, is_histograms))

    write_results(benchmark_name, perf_results, json_test_results,
                  isolated_out_dir, False)
    return rc