Beispiel #1
0
 def testConfigImport(self):
     p = mock.patch(configs.__name__ + '.FLAGS')
     self.addCleanup(p.stop)
     mock_flags = p.start()
     mock_flags.configure_mock(benchmark_config_file='test_import.yml')
     config = configs.GetUserConfig()
     self.assertEqual(config['flags']['num_vms'], 3)
 def testConfigOverrideFlag(self):
   p = mock.patch(configs.__name__ + '.FLAGS')
   self.addCleanup(p.stop)
   mock_flags = p.start()
   config_override = [
       'a.vm_groups.default.vm_count=5',
       'a.flags.flag=value']
   mock_flags.configure_mock(config_override=config_override,
                             benchmark_config_file=None)
   config = configs.GetUserConfig()
   self.assertEqual(config['a']['vm_groups']['default']['vm_count'], 5)
   self.assertEqual(config['a']['flags']['flag'], 'value')
def GetBenchmarksFromFlags():
    """Returns a list of benchmarks to run based on the benchmarks flag.

  If no benchmarks (or sets) are specified, this will return the standard set.
  If multiple sets or mixes of sets and benchmarks are specified, this will
  return the union of all sets and individual benchmarks.
  """
    user_config = configs.GetUserConfig()
    benchmark_config_list = _GetBenchmarksFromUserConfig(user_config)
    if benchmark_config_list and not FLAGS['benchmarks'].present:
        return benchmark_config_list

    benchmark_names = set()
    for benchmark in FLAGS.benchmarks:
        if benchmark in BENCHMARK_SETS:
            benchmark_names |= set(BENCHMARK_SETS[benchmark][BENCHMARK_LIST])
        else:
            benchmark_names.add(benchmark)

    # Expand recursive sets
    expanded = set()
    did_expansion = True
    while did_expansion:
        did_expansion = False
        for benchmark_name in benchmark_names:
            if (benchmark_name in BENCHMARK_SETS):
                did_expansion = True
                benchmark_names.remove(benchmark_name)
                if (benchmark_name not in expanded):
                    expanded.add(benchmark_name)
                    benchmark_names |= set(
                        BENCHMARK_SETS[benchmark_name][BENCHMARK_LIST])
                break

    valid_benchmarks = _GetValidBenchmarks()

    # create a list of module, config tuples to return
    benchmark_config_list = []
    for benchmark_name in benchmark_names:
        if benchmark_name in valid_benchmarks:
            benchmark_module = valid_benchmarks[benchmark_name]
            user_config = user_config.get(benchmark_name, {})
            benchmark_config_list.append((benchmark_module, user_config))
        else:
            raise ValueError('Benchmark "%s" not valid on os_type "%s"' %
                             (benchmark_name, FLAGS.os_type))

    return benchmark_config_list
def GetBenchmarksFromFlags():
  """Returns a list of benchmarks to run based on the benchmarks flag.

  If no benchmarks (or sets) are specified, this will return the standard set.
  If multiple sets or mixes of sets and benchmarks are specified, this will
  return the union of all sets and individual benchmarks.
  """
  user_config = configs.GetUserConfig()
  benchmark_config_list = _GetBenchmarksFromUserConfig(user_config)
  if benchmark_config_list and not FLAGS['benchmarks'].present:
    return benchmark_config_list

  benchmark_names = set()
  for benchmark in FLAGS.benchmarks:
    if benchmark in BENCHMARK_SETS:
      benchmark_names |= set(BENCHMARK_SETS[benchmark][BENCHMARK_LIST])
    else:
      benchmark_names.add(benchmark)

  # Expand recursive sets
  expanded = set()
  did_expansion = True
  while did_expansion:
    did_expansion = False
    for benchmark_name in benchmark_names:
      if (benchmark_name in BENCHMARK_SETS):
        did_expansion = True
        benchmark_names.remove(benchmark_name)
        if (benchmark_name not in expanded):
            expanded.add(benchmark_name)
            benchmark_names |= set(BENCHMARK_SETS[
                benchmark_name][BENCHMARK_LIST])
        break

  valid_benchmarks = _GetValidBenchmarks()

  # create a list of module, config tuples to return
  benchmark_config_list = []
  for benchmark_name in benchmark_names:
    benchmark_config = user_config.get(benchmark_name, {})
    benchmark_name = benchmark_config.get('name', benchmark_name)
    benchmark_module = valid_benchmarks.get(benchmark_name)

    if benchmark_module is None:
      raise ValueError('Benchmark "%s" not valid on os_type "%s"' %
                       (benchmark_name, FLAGS.os_type))


    # We need to remove the 'flag_matrix', 'flag_matrix_defs', 'flag_zip',
    # 'flag_zip_defs', and 'flag_matrix_filters' keys from the config
    # dictionary since they aren't actually part of the config spec and will
    # cause errors if they are left in.
    flag_matrix_name = benchmark_config.pop(
        'flag_matrix', None)
    flag_matrix_name = FLAGS.flag_matrix or flag_matrix_name
    flag_matrix = benchmark_config.pop(
        'flag_matrix_defs', {}).get(flag_matrix_name, {})
    flag_matrix_filter = benchmark_config.pop(
        'flag_matrix_filters', {}).get(flag_matrix_name)
    flag_zip_name = benchmark_config.pop(
        'flag_zip', None)
    flag_zip_name = FLAGS.flag_zip or flag_zip_name
    flag_zip = benchmark_config.pop(
        'flag_zip_defs', {}).get(flag_zip_name, {})

    zipped_axes = []
    crossed_axes = []
    if flag_zip:
      flag_axes = []
      for flag, values in flag_zip.iteritems():
        flag_axes.append([{flag: v} for v in values])

      _AssertZipAxesHaveSameLength(flag_axes)

      for flag_config in itertools.izip(*flag_axes):
        config = _GetConfigForAxis(benchmark_config, flag_config)
        zipped_axes.append((benchmark_module, config))

      crossed_axes.append([benchmark_tuple[1]['flags'] for
                           benchmark_tuple in zipped_axes])

    for flag, values in flag_matrix.iteritems():
      crossed_axes.append([{flag: v} for v in values])

    for flag_config in itertools.product(*crossed_axes):
      config = _GetConfigForAxis(benchmark_config, flag_config)
      if (flag_matrix_filter and not eval(
          flag_matrix_filter, {}, config['flags'])):
          continue
      benchmark_config_list.append((benchmark_module, config))

  return benchmark_config_list
Beispiel #5
0
def GetBenchmarksFromFlags():
    """Returns a list of benchmarks to run based on the benchmarks flag.

  If no benchmarks (or sets) are specified, this will return the standard set.
  If multiple sets or mixes of sets and benchmarks are specified, this will
  return the union of all sets and individual benchmarks.

  Raises:
    ValueError: when benchmark_name is not valid for os_type supplied
  """
    user_config = configs.GetUserConfig()
    benchmark_config_list = _GetBenchmarksFromUserConfig(user_config)
    if benchmark_config_list and not FLAGS['benchmarks'].present:
        return benchmark_config_list

    benchmark_queue = collections.deque(FLAGS.benchmarks)
    benchmark_names = []
    benchmark_set = set()

    while benchmark_queue:
        benchmark = benchmark_queue.popleft()
        if benchmark in benchmark_set:
            continue
        benchmark_set.add(benchmark)
        if benchmark in BENCHMARK_SETS:
            benchmark_queue.extendleft(
                BENCHMARK_SETS[benchmark][BENCHMARK_LIST])
        else:
            benchmark_names.append(benchmark)

    valid_benchmarks = _GetValidBenchmarks()

    # create a list of module, config tuples to return
    benchmark_config_list = []
    for benchmark_name in benchmark_names:
        benchmark_config = user_config.get(benchmark_name, {})
        benchmark_name = benchmark_config.get('name', benchmark_name)
        benchmark_module = valid_benchmarks.get(benchmark_name)

        if benchmark_module is None:
            raise ValueError('Benchmark "%s" not valid on os_type "%s"' %
                             (benchmark_name, FLAGS.os_type))

        flag_matrix_name = (FLAGS.flag_matrix
                            or benchmark_config.get('flag_matrix', None))
        flag_zip_name = (FLAGS.flag_zip
                         or benchmark_config.get('flag_zip', None))
        _AssertFlagMatrixAndZipDefsExist(benchmark_config, flag_matrix_name,
                                         flag_zip_name)

        # We need to remove the 'flag_matrix', 'flag_matrix_defs', 'flag_zip',
        # 'flag_zip_defs', and 'flag_matrix_filters' keys from the config
        # dictionary since they aren't actually part of the config spec and will
        # cause errors if they are left in.
        benchmark_config.pop('flag_matrix', None)
        benchmark_config.pop('flag_zip', None)

        flag_matrix = benchmark_config.pop('flag_matrix_defs',
                                           {}).get(flag_matrix_name, {})
        flag_matrix_filter = benchmark_config.pop('flag_matrix_filters',
                                                  {}).get(
                                                      flag_matrix_name, {})
        flag_zip = benchmark_config.pop('flag_zip_defs',
                                        {}).get(flag_zip_name, {})

        zipped_axes = []
        crossed_axes = []
        if flag_zip:
            flag_axes = []
            for flag, values in flag_zip.iteritems():
                flag_axes.append([{flag: v} for v in values])

            _AssertZipAxesHaveSameLength(flag_axes)

            for flag_config in itertools.izip(*flag_axes):
                config = _GetConfigForAxis(benchmark_config, flag_config)
                zipped_axes.append((benchmark_module, config))

            crossed_axes.append([
                benchmark_tuple[1]['flags'] for benchmark_tuple in zipped_axes
            ])

        for flag, values in sorted(flag_matrix.iteritems()):
            crossed_axes.append([{flag: v} for v in values])

        for flag_config in itertools.product(*crossed_axes):
            config = _GetConfigForAxis(benchmark_config, flag_config)
            if (flag_matrix_filter
                    and not eval(flag_matrix_filter, {}, config['flags'])):
                continue
            benchmark_config_list.append((benchmark_module, config))

    return benchmark_config_list