def testFlagChangesAreNotReflectedInConfigDict(self): flag_values = flags.FlagValues() flags.DEFINE_integer('test_flag', 0, 'Test flag.', flag_values=flag_values) flag_values([sys.argv[0]]) flag_values_overrides = {} flag_values_overrides['test_flag'] = 1 self.assertFlagState(flag_values, 0, False) self.assertEqual(flag_values_overrides['test_flag'], 1) with flag_util.OverrideFlags(flag_values, flag_values_overrides): self.assertFlagState(flag_values, 1, True) flag_values.test_flag = 2 self.assertFlagState(flag_values, 2, True) self.assertEqual(flag_values_overrides['test_flag'], 1)
def testReadAndWrite(self): flag_values = flags.FlagValues() flags.DEFINE_integer('test_flag', 0, 'Test flag.', flag_values=flag_values) flag_values([sys.argv[0]]) flag_values_overrides = {} flag_values_overrides['test_flag'] = 1 self.assertFlagState(flag_values, 0, False) self.assertEqual(flag_values_overrides['test_flag'], 1) with flag_util.OverrideFlags(flag_values, flag_values_overrides): self.assertFlagState(flag_values, 1, True) self.assertEqual(flag_values_overrides['test_flag'], 1) self.assertFlagState(flag_values, 0, False) self.assertEqual(flag_values_overrides['test_flag'], 1) flag_values.test_flag = 3 self.assertFlagState(flag_values, 3, False) self.assertEqual(flag_values_overrides['test_flag'], 1)
def _CreateBenchmarkSpecs(): """Create a list of BenchmarkSpecs for each benchmark run to be scheduled. Returns: A list of BenchmarkSpecs. """ specs = [] benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() benchmark_counts = collections.defaultdict(itertools.count) for benchmark_module, user_config in benchmark_tuple_list: # Construct benchmark config object. name = benchmark_module.BENCHMARK_NAME expected_os_types = (os_types.WINDOWS_OS_TYPES if FLAGS.os_type in os_types.WINDOWS_OS_TYPES else os_types.LINUX_OS_TYPES) with flag_util.OverrideFlags(FLAGS, user_config.get('flags')): config_dict = benchmark_module.GetConfig(user_config) config_spec_class = getattr(benchmark_module, 'BENCHMARK_CONFIG_SPEC_CLASS', benchmark_config_spec.BenchmarkConfigSpec) config = config_spec_class(name, expected_os_types=expected_os_types, flag_values=FLAGS, **config_dict) # Assign a unique ID to each benchmark run. This differs even between two # runs of the same benchmark within a single PKB run. uid = name + str(next(benchmark_counts[name])) # Optional step to check flag values and verify files exist. check_prereqs = getattr(benchmark_module, 'CheckPrerequisites', None) if check_prereqs: try: with config.RedirectFlags(FLAGS): check_prereqs(config) except: logging.exception('Prerequisite check failed for %s', name) raise specs.append( benchmark_spec.BenchmarkSpec.GetBenchmarkSpec( benchmark_module, config, uid)) return specs