Exemplo n.º 1
0
def PatchFlags(mock_flags=None):
    """Patches read and write access to perfkitbenchmarker.flags.FLAGS.

  By patching the underlying FlagValues instance, this method affects all
  modules that have read FLAGS from perfkitbenchmarker.flags. For example, a
  module my_module.py may have the code
      from perfkitbenchmarker import flags
      FLAGS = flags.FLAGS
      ...
      def Func():
        my_flag = FLAGS['cloud']
        my_value = FLAGS.cloud
        FLAGS.cloud = my_override_value
  Within the effect of the PatchFlags contextmanager, calling my_module.Func()
  will cause my_flag and my_value to be initialized from mock_flags rather than
  an actual FlagValues instance. Similarly, mock_flags.cloud will be set with
  my_override_value.

  Args:
    mock_flags: None or MockFlags. If provided, the source of mocked flag
        values. If not provided, a new MockFlags object will be used.

  Yields:
    MockFlags. Either mock_flags or the newly created MockFlags value.
  """
    mock_flags = mock_flags or MockFlags()
    with flag_util.FlagDictSubstitution(FLAGS, mock_flags.FlagDict):
        yield mock_flags
Exemplo n.º 2
0
def PatchTestCaseFlags(testcase):
    """Patches access to perfkitbenchmarker.flags.FLAGS for a TestCase.

  Similar to PatchFlags, but only needs to be called once during a test method
  or its setUp method, and remains in effect for the rest of the test method.

  Args:
    testcase: unittest.TestCase. The current test. A cleanup method is
        registered to undo the patch after this test completes.

  Returns:
    MockFlags. The mocked FlagValues object.
  """
    # TODO(ferneyhough): This is a convienence for a number of unittests that
    # use both mock_flags and real flags. When using real flags in unittests,
    # the flags must be marked as parsed before attempting to read any of them.
    # The best way to use flags in unittests will likely change due to
    # b/116248223, and this module may no longer be necessary.
    FLAGS.mark_as_parsed()

    mock_flags = MockFlags()
    substitution = flag_util.FlagDictSubstitution(FLAGS, mock_flags.FlagDict)
    substitution.__enter__()
    testcase.addCleanup(substitution.__exit__)
    return mock_flags
Exemplo n.º 3
0
 def testReadAndWrite(self):
     flag_values = flags.FlagValues()
     flags.DEFINE_integer('test_flag',
                          0,
                          'Test flag.',
                          flag_values=flag_values)
     flag_values([sys.argv[0]])
     flag_values_copy = copy.deepcopy(flag_values)
     flag_values_copy.test_flag = 1
     self.assertFlagState(flag_values, 0, False)
     self.assertFlagState(flag_values_copy, 1, False)
     if hasattr(flag_values_copy, '_flags'):
         flag_dict_func = flag_values_copy._flags
     else:
         flag_dict_func = flag_values_copy.FlagDict
     with flag_util.FlagDictSubstitution(flag_values, flag_dict_func):
         self.assertFlagState(flag_values, 1, False)
         self.assertFlagState(flag_values_copy, 1, False)
         flag_values.test_flag = 2
         flag_values['test_flag'].present += 1
         self.assertFlagState(flag_values, 2, True)
         self.assertFlagState(flag_values_copy, 2, True)
     self.assertFlagState(flag_values, 0, False)
     self.assertFlagState(flag_values_copy, 2, True)
     flag_values.test_flag = 3
     self.assertFlagState(flag_values, 3, False)
     self.assertFlagState(flag_values_copy, 2, True)
Exemplo n.º 4
0
    def RedirectFlags(self, flag_values):
        """Redirects flag reads and writes to the benchmark-specific flags object.

    Args:
      flag_values: flags.FlagValues object. Within the enclosed code block,
          reads and writes to this object are redirected to self.flags.
    """
        with flag_util.FlagDictSubstitution(flag_values, lambda: self.flags):
            yield
Exemplo n.º 5
0
  def RedirectGlobalFlags(self):
    """Redirects flag reads and writes to the benchmark-specific flags object.

    Within the enclosed code block, reads and writes to the flags.FLAGS object
    are redirected to a copy that has been merged with config-provided flag
    overrides specific to this benchmark run.
    """
    with flag_util.FlagDictSubstitution(FLAGS, lambda: self.config.flags):
      yield
Exemplo n.º 6
0
def _CreateBenchmarkRunList():
    """Create a list of configs and states for each benchmark run to be scheduled.

  Returns:
    list of (args, run_status_list) pairs. Contains one pair per benchmark run
    to be scheduled, including multiple pairs for benchmarks that will be run
    multiple times. args is a tuple of the first five arguments to pass to
    RunBenchmark, and run_status_list is a list of strings in the order of
    [benchmark_name, benchmark_uid, benchmark_status].
  """
    result = []
    benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
    total_benchmarks = len(benchmark_tuple_list)
    benchmark_counts = collections.defaultdict(itertools.count)
    for i, benchmark_tuple in enumerate(benchmark_tuple_list):
        # Construct benchmark config object.
        benchmark_module, user_config = benchmark_tuple
        name = benchmark_module.BENCHMARK_NAME
        expected_os_types = (os_types.WINDOWS_OS_TYPES
                             if FLAGS.os_type in os_types.WINDOWS_OS_TYPES else
                             os_types.LINUX_OS_TYPES)
        merged_flags = benchmark_config_spec.FlagsDecoder().Decode(
            user_config.get('flags'), 'flags', FLAGS)
        with flag_util.FlagDictSubstitution(FLAGS, lambda: merged_flags):
            config_dict = benchmark_module.GetConfig(user_config)
        config_spec_class = getattr(benchmark_module,
                                    'BENCHMARK_CONFIG_SPEC_CLASS',
                                    benchmark_config_spec.BenchmarkConfigSpec)
        config = config_spec_class(name,
                                   expected_os_types=expected_os_types,
                                   flag_values=FLAGS,
                                   **config_dict)

        # Assign a unique ID to each benchmark run. This differs even between two
        # runs of the same benchmark within a single PKB run.
        uid = name + str(benchmark_counts[name].next())

        # Optional step to check flag values and verify files exist.
        check_prereqs = getattr(benchmark_module, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                with config.RedirectFlags(FLAGS):
                    check_prereqs()
            except:
                logging.exception('Prerequisite check failed for %s', name)
                raise

        result.append(((benchmark_module, i + 1, total_benchmarks, config,
                        uid), [name, uid, benchmark_status.SKIPPED]))

    return result
Exemplo n.º 7
0
def _CreateBenchmarkSpecs():
    """Create a list of BenchmarkSpecs for each benchmark run to be scheduled.

  Returns:
    A list of BenchmarkSpecs.
  """
    specs = []
    benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
    benchmark_counts = collections.defaultdict(itertools.count)
    for benchmark_module, user_config in benchmark_tuple_list:
        # Construct benchmark config object.
        name = benchmark_module.BENCHMARK_NAME
        expected_os_types = (os_types.WINDOWS_OS_TYPES
                             if FLAGS.os_type in os_types.WINDOWS_OS_TYPES else
                             os_types.LINUX_OS_TYPES)
        merged_flags = benchmark_config_spec.FlagsDecoder().Decode(
            user_config.get('flags'), 'flags', FLAGS)
        with flag_util.FlagDictSubstitution(FLAGS, lambda: merged_flags):
            config_dict = benchmark_module.GetConfig(user_config)
        config_spec_class = getattr(benchmark_module,
                                    'BENCHMARK_CONFIG_SPEC_CLASS',
                                    benchmark_config_spec.BenchmarkConfigSpec)
        config = config_spec_class(name,
                                   expected_os_types=expected_os_types,
                                   flag_values=FLAGS,
                                   **config_dict)

        # Assign a unique ID to each benchmark run. This differs even between two
        # runs of the same benchmark within a single PKB run.
        uid = name + str(benchmark_counts[name].next())

        # Optional step to check flag values and verify files exist.
        check_prereqs = getattr(benchmark_module, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                with config.RedirectFlags(FLAGS):
                    check_prereqs(config)
            except:
                logging.exception('Prerequisite check failed for %s', name)
                raise

        specs.append(
            benchmark_spec.BenchmarkSpec.GetBenchmarkSpec(
                benchmark_module, config, uid))

    return specs
Exemplo n.º 8
0
def PatchTestCaseFlags(testcase):
    """Patches access to perfkitbenchmarker.flags.FLAGS for a TestCase.

  Similar to PatchFlags, but only needs to be called once during a test method
  or its setUp method, and remains in effect for the rest of the test method.

  Args:
    testcase: unittest.TestCase. The current test. A cleanup method is
        registered to undo the patch after this test completes.

  Returns:
    MockFlags. The mocked FlagValues object.
  """
    mock_flags = MockFlags()
    substitution = flag_util.FlagDictSubstitution(FLAGS, mock_flags.FlagDict)
    substitution.__enter__()
    testcase.addCleanup(substitution.__exit__)
    return mock_flags
 def testReadAndWrite(self):
   flag_values = flags.FlagValues()
   flags.DEFINE_integer('test_flag', 0, 'Test flag.', flag_values=flag_values)
   flag_values_copy = copy.deepcopy(flag_values)
   flag_values_copy.test_flag = 1
   self.assertFlagState(flag_values, 0, False)
   self.assertFlagState(flag_values_copy, 1, False)
   with flag_util.FlagDictSubstitution(flag_values, flag_values_copy.FlagDict):
     self.assertFlagState(flag_values, 1, False)
     self.assertFlagState(flag_values_copy, 1, False)
     flag_values.test_flag = 2
     flag_values['test_flag'].present += 1
     self.assertFlagState(flag_values, 2, True)
     self.assertFlagState(flag_values_copy, 2, True)
   self.assertFlagState(flag_values, 0, False)
   self.assertFlagState(flag_values_copy, 2, True)
   flag_values.test_flag = 3
   self.assertFlagState(flag_values, 3, False)
   self.assertFlagState(flag_values_copy, 2, True)
  def _DecodeAndInit(self, component_full_name, config, decoders, flag_values):
    """Initializes spec attributes from provided config option values.

    Args:
      component_full_name: string. Fully qualified name of the configurable
          component containing the config options.
      config: dict mapping option name string to option value.
      flag_values: flags.FlagValues. Runtime flags that may override provided
          config option values. These flags have already been applied to the
          current config, but they may be passed to the decoders for propagation
          to deeper spec constructors.
      decoders: OrderedDict mapping option name string to ConfigOptionDecoder.
    """
    # Decode benchmark-specific flags first and use them while decoding the
    # rest of the BenchmarkConfigSpec's options.
    decoders = decoders.copy()
    self.flags = decoders.pop('flags').Decode(config.pop('flags', None),
                                              component_full_name, flag_values)
    with flag_util.FlagDictSubstitution(flag_values, lambda: self.flags):
      super(BenchmarkConfigSpec, self)._DecodeAndInit(
          component_full_name, config, decoders, flag_values)