def setUp(self):
     super(FlagsDecoderTestCase, self).setUp()
     self._decoder = benchmark_config_spec.FlagsDecoder(option=_OPTION)
     self._flag_values = flags.FlagValues()
     flags.DEFINE_integer('test_flag',
                          0,
                          'Test flag.',
                          flag_values=self._flag_values)
def _CreateBenchmarkRunList():
    """Create a list of configs and states for each benchmark run to be scheduled.

  Returns:
    list of (args, run_status_list) pairs. Contains one pair per benchmark run
    to be scheduled, including multiple pairs for benchmarks that will be run
    multiple times. args is a tuple of the first five arguments to pass to
    RunBenchmark, and run_status_list is a list of strings in the order of
    [benchmark_name, benchmark_uid, benchmark_status].
  """
    result = []
    benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
    total_benchmarks = len(benchmark_tuple_list)
    benchmark_counts = collections.defaultdict(itertools.count)
    for i, benchmark_tuple in enumerate(benchmark_tuple_list):
        # Construct benchmark config object.
        benchmark_module, user_config = benchmark_tuple
        name = benchmark_module.BENCHMARK_NAME
        expected_os_types = (os_types.WINDOWS_OS_TYPES
                             if FLAGS.os_type in os_types.WINDOWS_OS_TYPES else
                             os_types.LINUX_OS_TYPES)
        merged_flags = benchmark_config_spec.FlagsDecoder().Decode(
            user_config.get('flags'), 'flags', FLAGS)
        with flag_util.FlagDictSubstitution(FLAGS, lambda: merged_flags):
            config_dict = benchmark_module.GetConfig(user_config)
        config_spec_class = getattr(benchmark_module,
                                    'BENCHMARK_CONFIG_SPEC_CLASS',
                                    benchmark_config_spec.BenchmarkConfigSpec)
        config = config_spec_class(name,
                                   expected_os_types=expected_os_types,
                                   flag_values=FLAGS,
                                   **config_dict)

        # Assign a unique ID to each benchmark run. This differs even between two
        # runs of the same benchmark within a single PKB run.
        uid = name + str(benchmark_counts[name].next())

        # Optional step to check flag values and verify files exist.
        check_prereqs = getattr(benchmark_module, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                with config.RedirectFlags(FLAGS):
                    check_prereqs()
            except:
                logging.exception('Prerequisite check failed for %s', name)
                raise

        result.append(((benchmark_module, i + 1, total_benchmarks, config,
                        uid), [name, uid, benchmark_status.SKIPPED]))

    return result
Beispiel #3
0
def _CreateBenchmarkSpecs():
    """Create a list of BenchmarkSpecs for each benchmark run to be scheduled.

  Returns:
    A list of BenchmarkSpecs.
  """
    specs = []
    benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
    benchmark_counts = collections.defaultdict(itertools.count)
    for benchmark_module, user_config in benchmark_tuple_list:
        # Construct benchmark config object.
        name = benchmark_module.BENCHMARK_NAME
        expected_os_types = (os_types.WINDOWS_OS_TYPES
                             if FLAGS.os_type in os_types.WINDOWS_OS_TYPES else
                             os_types.LINUX_OS_TYPES)
        merged_flags = benchmark_config_spec.FlagsDecoder().Decode(
            user_config.get('flags'), 'flags', FLAGS)
        with flag_util.FlagDictSubstitution(FLAGS, lambda: merged_flags):
            config_dict = benchmark_module.GetConfig(user_config)
        config_spec_class = getattr(benchmark_module,
                                    'BENCHMARK_CONFIG_SPEC_CLASS',
                                    benchmark_config_spec.BenchmarkConfigSpec)
        config = config_spec_class(name,
                                   expected_os_types=expected_os_types,
                                   flag_values=FLAGS,
                                   **config_dict)

        # Assign a unique ID to each benchmark run. This differs even between two
        # runs of the same benchmark within a single PKB run.
        uid = name + str(benchmark_counts[name].next())

        # Optional step to check flag values and verify files exist.
        check_prereqs = getattr(benchmark_module, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                with config.RedirectFlags(FLAGS):
                    check_prereqs(config)
            except:
                logging.exception('Prerequisite check failed for %s', name)
                raise

        specs.append(
            benchmark_spec.BenchmarkSpec.GetBenchmarkSpec(
                benchmark_module, config, uid))

    return specs