Esempio n. 1
0
def generate_telemetry_tests(tester_config, benchmarks, benchmark_sharding_map,
                             use_whitelist):
    isolated_scripts = []
    # First determine the browser that you need based on the tester
    browser_name = ''
    if tester_config['platform'] == 'android':
        browser_name = 'android-chromium'
    elif (tester_config['platform'] == 'win'
          and tester_config['target_bits'] == 64):
        browser_name = 'release_x64'
    else:
        browser_name = 'release'

    num_shards = len(tester_config['swarming_dimensions'][0]['device_ids'])
    current_shard = 0
    for benchmark in benchmarks:
        # First figure out swarming dimensions this test needs to be triggered on.
        # For each set of dimensions it is only triggered on one of the devices
        swarming_dimensions = []
        for dimension in tester_config['swarming_dimensions']:
            device_affinity = None
            if benchmark_sharding_map:
                sharding_map = benchmark_sharding_map.get(
                    str(num_shards), None)
                if not sharding_map and not use_whitelist:
                    raise Exception(
                        'Invalid number of shards, generate new sharding map')
                if use_whitelist:
                    device_affinity = current_shard
                else:
                    device_affinity = sharding_map.get(benchmark.Name(), None)
            else:
                # No sharding map was provided, default to legacy device
                # affinity algorithm
                device_affinity = bot_utils.GetDeviceAffinity(
                    num_shards, benchmark.Name())
            if device_affinity is None:
                raise Exception('Device affinity for benchmark %s not found' %
                                benchmark.Name())
            swarming_dimensions.append(
                get_swarming_dimension(dimension, device_affinity))

        test = generate_telemetry_test(swarming_dimensions, benchmark.Name(),
                                       browser_name)
        isolated_scripts.append(test)
        # Now create another executable for this benchmark on the reference browser
        reference_test = generate_telemetry_test(swarming_dimensions,
                                                 benchmark.Name(), 'reference')
        isolated_scripts.append(reference_test)
        if current_shard == (num_shards - 1):
            current_shard = 0
        else:
            current_shard += 1

    return isolated_scripts
def shard_benchmarks(num_shards, all_benchmarks):
  benchmark_to_shard_dict = {}
  shard_execution_times = [0] * num_shards
  sorted_benchmark_list, new_benchmarks = get_sorted_benchmark_list_by_time(
    all_benchmarks)
  # Iterate over in reverse order and add them to the current smallest bucket.
  for benchmark in sorted_benchmark_list:
    # Find current smallest bucket
    min_index = shard_execution_times.index(min(shard_execution_times))
    benchmark_to_shard_dict[benchmark[0].Name()] = min_index
    shard_execution_times[min_index] += benchmark[1]
  # For all the benchmarks that didn't have avg run times, use the default
  # device affinity algorithm
  for benchmark in new_benchmarks:
     device_affinity = bot_utils.GetDeviceAffinity(num_shards, benchmark.Name())
     benchmark_to_shard_dict[benchmark.Name()] = device_affinity
  return benchmark_to_shard_dict
Esempio n. 3
0
def generate_telemetry_tests(tester_config, benchmarks):
  isolated_scripts = []
  # First determine the browser that you need based on the tester
  browser_name = ''
  if tester_config['platform'] == 'android':
    browser_name = 'android-chromium'
  elif (tester_config['platform'] == 'win'
    and tester_config['target_bits'] == 64):
    browser_name = 'release_x64'
  else:
    browser_name ='release'

  for benchmark in benchmarks:
    # First figure out swarming dimensions this test needs to be triggered on.
    # For each set of dimensions it is only triggered on one of the devices
    swarming_dimensions = []
    for dimension in tester_config['swarming_dimensions']:
      device_affinity = bot_utils.GetDeviceAffinity(
          len(dimension['device_ids']), benchmark.Name())

      device_id = dimension['device_ids'][device_affinity]
      # Id is unique within the swarming pool so it is the only needed
      # identifier for the bot to run the test on
      swarming_dimensions.append({
        'id': device_id
      })

    test = generate_telemetry_test(
      swarming_dimensions, benchmark.Name(), browser_name)
    isolated_scripts.append(test)
    # Now create another executable for this benchmark on the reference browser
    reference_test = generate_telemetry_test(
      swarming_dimensions, benchmark.Name(),'reference')
    isolated_scripts.append(reference_test)

  return isolated_scripts
Esempio n. 4
0
def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
                          benchmark_classes, num_shards):
    """Returns a list of all enabled benchmarks in a JSON format expected by
  buildbots.

  JSON format:
  { "version": <int>,
    "steps": {
      <string>: {
        "device_affinity": <int>,
        "cmd": <string>,
        "perf_dashboard_id": <string>,
      },
      ...
    }
  }
  """
    # TODO(charliea): Remove this once we have more power perf bots.
    only_run_battor_benchmarks = False
    print 'Environment variables: ', os.environ
    if os.environ.get('BUILDBOT_BUILDERNAME') in GOOD_POWER_PERF_BOT_WHITELIST:
        only_run_battor_benchmarks = True

    output = {'version': 1, 'steps': {}}
    for benchmark_class in benchmark_classes:
        # Filter out benchmarks in tools/perf/contrib/ directory
        # This is a terrible hack but we should no longer need this
        # _GetJsonBenchmarkList once all the perf bots are moved to swarming
        # (crbug.com/715565)
        if ('contrib' in os.path.abspath(
                sys.modules[benchmark_class.__module__].__file__)):
            continue

        if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
            continue

        base_name = benchmark_class.Name()
        # TODO(charliea): Remove this once we have more power perf bots.
        # Only run battor power benchmarks to reduce the cycle time of this bot.
        # TODO(rnephew): Enable media.* and power.* tests when Mac BattOr issue
        # is solved.
        if only_run_battor_benchmarks and not base_name.startswith('battor'):
            continue
        base_cmd = [
            sys.executable,
            os.path.realpath(sys.argv[0]), '-v', '--output-format=chartjson',
            '--upload-results', base_name
        ]
        perf_dashboard_id = base_name

        device_affinity = bot_utils.GetDeviceAffinity(num_shards, base_name)

        output['steps'][base_name] = {
            'cmd':
            ' '.join(base_cmd +
                     ['--browser=%s' % possible_browser.browser_type]),
            'device_affinity':
            device_affinity,
            'perf_dashboard_id':
            perf_dashboard_id,
        }
        if (possible_reference_browser and _IsBenchmarkEnabled(
                benchmark_class, possible_reference_browser)):
            output['steps'][base_name + '.reference'] = {
                'cmd':
                ' '.join(base_cmd +
                         ['--browser=reference', '--output-trace-tag=_ref']),
                'device_affinity':
                device_affinity,
                'perf_dashboard_id':
                perf_dashboard_id,
            }

    return json.dumps(output, indent=2, sort_keys=True)