コード例 #1
0
  def GetBenchmarkSpec(cls, benchmark_module, config, uid):
    """Unpickles or creates a BenchmarkSpec and returns it.

    Args:
      benchmark_module: The benchmark module object.
      config: BenchmarkConfigSpec. The configuration for the benchmark.
      uid: An identifier unique to this run of the benchmark even if the same
          benchmark is run multiple times with different configs.

    Returns:
      A BenchmarkSpec object.
    """
    if stages.PROVISION in FLAGS.run_stage:
      return cls(benchmark_module, config, uid)

    try:
      with open(cls._GetPickleFilename(uid), 'rb') as pickle_file:
        spec = pickle.load(pickle_file)
    except Exception as e:  # pylint: disable=broad-except
      logging.error('Unable to unpickle spec file for benchmark %s.',
                    benchmark_module.BENCHMARK_NAME)
      raise e
    # Always let the spec be deleted after being unpickled so that
    # it's possible to run cleanup even if cleanup has already run.
    spec.deleted = False
    spec.status = benchmark_status.SKIPPED
    context.SetThreadBenchmarkSpec(spec)
    return spec
コード例 #2
0
    def GetSpecFromFile(cls, name, config):
        """Unpickles the spec and returns it.

    Args:
      name: The name of the benchmark (and the name of the pickled file).
      config: BenchmarkConfigSpec. The benchmark configuration to use while
          running the current stage.

    Returns:
      A BenchmarkSpec object.
    """
        file_name = '%s/%s' % (vm_util.GetTempDir(), name)
        try:
            with open(file_name, 'rb') as pickle_file:
                spec = pickle.load(pickle_file)
        except Exception as e:  # pylint: disable=broad-except
            logging.error('Unable to unpickle spec file for benchmark %s.',
                          name)
            raise e
        spec.config = config
        # Always let the spec be deleted after being unpickled so that
        # it's possible to run cleanup even if cleanup has already run.
        spec.deleted = False
        context.SetThreadBenchmarkSpec(spec)
        return spec
コード例 #3
0
  def __init__(self, benchmark_config, benchmark_name, benchmark_uid):
    """Initialize a BenchmarkSpec object.

    Args:
      benchmark_config: BenchmarkConfigSpec. The configuration for the
          benchmark.
      benchmark_name: string. Name of the benchmark.
      benchmark_uid: An identifier unique to this run of the benchmark even
          if the same benchmark is run multiple times with different configs.
    """
    self.config = benchmark_config
    self.name = benchmark_name
    self.uid = benchmark_uid
    self.vms = []
    self.networks = {}
    self.firewalls = {}
    self.networks_lock = threading.Lock()
    self.firewalls_lock = threading.Lock()
    self.vm_groups = {}
    self.deleted = False
    self.file_name = os.path.join(vm_util.GetTempDir(), self.uid)
    self.uuid = str(uuid.uuid4())
    self.always_call_cleanup = False

    # Set the current thread's BenchmarkSpec object to this one.
    context.SetThreadBenchmarkSpec(self)
コード例 #4
0
 def _DoWork(_):
   self.assertEqual(benchmark_spec, context.GetThreadBenchmarkSpec())
   new_benchmark_spec = mock.MagicMock()
   context.SetThreadBenchmarkSpec(new_benchmark_spec)
   self.assertNotEqual(benchmark_spec, context.GetThreadBenchmarkSpec())
   self.assertEqual(new_benchmark_spec,
                    context.GetThreadBenchmarkSpec())
コード例 #5
0
    def doAzureDiskTest(self, storage_type, disk_type, machine_type,
                        goal_media, goal_replication, goal_legacy_disk_type):
        with mock.patch(azure_disk.__name__ + '.FLAGS') as disk_flags:
            disk_flags.azure_storage_type = storage_type
            disk_spec = disk.BaseDiskSpec(_COMPONENT,
                                          disk_size=2,
                                          disk_type=disk_type)

            context.SetThreadBenchmarkSpec(
                benchmark_spec.BenchmarkSpec({}, 'name', 'uid'))

            vm_spec = virtual_machine.BaseVmSpec('test_vm_spec.AZURE',
                                                 zone='East US 2',
                                                 machine_type=machine_type)
            vm = azure_virtual_machine.DebianBasedAzureVirtualMachine(vm_spec)

            azure_disk.AzureDisk.Create = mock.Mock()
            azure_disk.AzureDisk.Attach = mock.Mock()
            vm.CreateScratchDisk(disk_spec)

            self.assertEqual(
                vm.scratch_disks[0].metadata, {
                    disk.MEDIA: goal_media,
                    disk.REPLICATION: goal_replication,
                    disk.LEGACY_DISK_TYPE: goal_legacy_disk_type
                })
コード例 #6
0
    def __init__(self, benchmark_config, benchmark_name, benchmark_uid):
        """Initialize a BenchmarkSpec object.

    Args:
      benchmark_config: A Python dictionary representation of the configuration
        for the benchmark. For a complete explanation, see
        perfkitbenchmarker/configs/__init__.py.
      benchmark_name: string. Name of the benchmark.
      benchmark_uid: An identifier unique to this run of the benchmark even
        if the same benchmark is run multiple times with different configs.
    """
        # TODO(skschneider): Build the BenchmarkConfigSpec outside of this method,
        # and pass it in.
        self.config = benchmark_config_spec.BenchmarkConfigSpec(
            benchmark_name, flag_values=FLAGS, **benchmark_config)
        self.name = benchmark_name
        self.uid = benchmark_uid
        self.vms = []
        self.networks = {}
        self.firewalls = {}
        self.networks_lock = threading.Lock()
        self.firewalls_lock = threading.Lock()
        self.vm_groups = {}
        self.deleted = False
        self.file_name = os.path.join(vm_util.GetTempDir(), self.uid)
        self.uuid = str(uuid.uuid4())
        self.always_call_cleanup = False

        # Set the current thread's BenchmarkSpec object to this one.
        context.SetThreadBenchmarkSpec(self)
コード例 #7
0
def RunBenchmark(spec, collector):
    """Runs a single benchmark and adds the results to the collector.

  Args:
    spec: The BenchmarkSpec object with run information.
    collector: The SampleCollector object to add samples to.
  """
    spec.status = benchmark_status.FAILED
    # Modify the logger prompt for messages logged within this function.
    label_extension = '{}({}/{})'.format(spec.name, spec.sequence_number,
                                         spec.total_benchmarks)
    context.SetThreadBenchmarkSpec(spec)
    log_context = log_util.GetThreadLogContext()
    with log_context.ExtendLabel(label_extension):
        with spec.RedirectGlobalFlags():
            end_to_end_timer = timing_util.IntervalTimer()
            detailed_timer = timing_util.IntervalTimer()
            try:
                with end_to_end_timer.Measure('End to End'):
                    if stages.PROVISION in FLAGS.run_stage:
                        DoProvisionPhase(spec, detailed_timer)

                    if stages.PREPARE in FLAGS.run_stage:
                        DoPreparePhase(spec, detailed_timer)

                    if stages.RUN in FLAGS.run_stage:
                        DoRunPhase(spec, collector, detailed_timer)

                    if stages.CLEANUP in FLAGS.run_stage:
                        DoCleanupPhase(spec, detailed_timer)

                    if stages.TEARDOWN in FLAGS.run_stage:
                        DoTeardownPhase(spec, detailed_timer)

                # Add timing samples.
                if (FLAGS.run_stage == stages.STAGES
                        and timing_util.EndToEndRuntimeMeasurementEnabled()):
                    collector.AddSamples(end_to_end_timer.GenerateSamples(),
                                         spec.name, spec)
                if timing_util.RuntimeMeasurementsEnabled():
                    collector.AddSamples(detailed_timer.GenerateSamples(),
                                         spec.name, spec)

            except:
                # Resource cleanup (below) can take a long time. Log the error to give
                # immediate feedback, then re-throw.
                logging.exception('Error during benchmark %s', spec.name)
                # If the particular benchmark requests us to always call cleanup, do it
                # here.
                if stages.CLEANUP in FLAGS.run_stage and spec.always_call_cleanup:
                    DoCleanupPhase(spec, detailed_timer)
                raise
            finally:
                if stages.TEARDOWN in FLAGS.run_stage:
                    spec.Delete()
                events.benchmark_end.send(benchmark_spec=spec)
                # Pickle spec to save final resource state.
                spec.Pickle()
    spec.status = benchmark_status.SUCCEEDED
コード例 #8
0
    def __init__(self, benchmark_module, benchmark_config, benchmark_uid):
        """Initialize a BenchmarkSpec object.

    Args:
      benchmark_module: The benchmark module object.
      benchmark_config: BenchmarkConfigSpec. The configuration for the
          benchmark.
      benchmark_uid: An identifier unique to this run of the benchmark even
          if the same benchmark is run multiple times with different configs.
    """
        self.config = benchmark_config
        self.name = benchmark_module.BENCHMARK_NAME
        self.uid = benchmark_uid
        self.status = benchmark_status.SKIPPED
        self.failed_substatus = None
        self.status_detail = None
        BenchmarkSpec.total_benchmarks += 1
        self.sequence_number = BenchmarkSpec.total_benchmarks
        self.vms = []
        self.networks = {}
        self.firewalls = {}
        self.networks_lock = threading.Lock()
        self.firewalls_lock = threading.Lock()
        self.vm_groups = {}
        self.container_specs = benchmark_config.container_specs or {}
        self.container_registry = None
        self.deleted = False
        self.uuid = '%s-%s' % (FLAGS.run_uri, uuid.uuid4())
        self.always_call_cleanup = False
        self.spark_service = None
        self.dpb_service = None
        self.container_cluster = None
        self.relational_db = None
        self.tpus = []
        self.tpu_groups = {}
        self.edw_service = None
        self.nfs_service = None
        self.smb_service = None
        self.app_groups = {}
        self._zone_index = 0
        self.capacity_reservations = []
        self.placement_group_specs = benchmark_config.placement_group_specs or {}
        self.placement_groups = {}
        self.vms_to_boot = (
            self.config.vm_groups if self.config.relational_db is None else
            relational_db.VmsToBoot(self.config.relational_db.vm_groups))

        # Modules can't be pickled, but functions can, so we store the functions
        # necessary to run the benchmark.
        self.BenchmarkPrepare = benchmark_module.Prepare
        self.BenchmarkRun = benchmark_module.Run
        self.BenchmarkCleanup = benchmark_module.Cleanup

        # Set the current thread's BenchmarkSpec object to this one.
        context.SetThreadBenchmarkSpec(self)
コード例 #9
0
  def testPropagation(self):
    benchmark_spec = mock.MagicMock()
    context.SetThreadBenchmarkSpec(benchmark_spec)

    def _DoWork(_):
      self.assertEqual(benchmark_spec, context.GetThreadBenchmarkSpec())
      new_benchmark_spec = mock.MagicMock()
      context.SetThreadBenchmarkSpec(new_benchmark_spec)
      self.assertNotEqual(benchmark_spec, context.GetThreadBenchmarkSpec())
      self.assertEqual(new_benchmark_spec,
                       context.GetThreadBenchmarkSpec())

    vm_util.RunThreaded(_DoWork, list(range(10)))
コード例 #10
0
  def __init__(self, benchmark_module, benchmark_config, benchmark_uid):
    """Initialize a BenchmarkSpec object.

    Args:
      benchmark_module: The benchmark module object.
      benchmark_config: BenchmarkConfigSpec. The configuration for the
          benchmark.
      benchmark_uid: An identifier unique to this run of the benchmark even
          if the same benchmark is run multiple times with different configs.
    """
    self.config = benchmark_config
    self.name = benchmark_module.BENCHMARK_NAME
    self.uid = benchmark_uid
    self.status = benchmark_status.SKIPPED
    BenchmarkSpec.total_benchmarks += 1
    self.sequence_number = BenchmarkSpec.total_benchmarks
    self.vms = []
    self.networks = {}
    self.firewalls = {}
    self.networks_lock = threading.Lock()
    self.firewalls_lock = threading.Lock()
    self.vm_groups = {}
    self.deleted = False
    self.uuid = '%s-%s' % (FLAGS.run_uri, uuid.uuid4())
    self.always_call_cleanup = False
    self.spark_service = None
    self.dpb_service = None
    self.container_cluster = None
    self.managed_relational_db = None
    self.cloud_tpu = None

    self._zone_index = 0

    # Modules can't be pickled, but functions can, so we store the functions
    # necessary to run the benchmark.
    self.BenchmarkPrepare = benchmark_module.Prepare
    self.BenchmarkRun = benchmark_module.Run
    self.BenchmarkCleanup = benchmark_module.Cleanup

    # Set the current thread's BenchmarkSpec object to this one.
    context.SetThreadBenchmarkSpec(self)
コード例 #11
0
    def doAwsDiskTest(self, disk_type, machine_type, goal_media,
                      goal_replication, goal_legacy_disk_type):
        disk_spec = aws_disk.AwsDiskSpec(_COMPONENT,
                                         disk_size=2,
                                         disk_type=disk_type)

        context.SetThreadBenchmarkSpec(
            benchmark_spec.BenchmarkSpec({}, 'name', 'uid'))

        vm_spec = virtual_machine.BaseVmSpec('test_vm_spec.AWS',
                                             zone='us-east-1a',
                                             machine_type=machine_type)
        vm = aws_virtual_machine.DebianBasedAwsVirtualMachine(vm_spec)

        vm.CreateScratchDisk(disk_spec)

        self.assertEqual(
            vm.scratch_disks[0].metadata, {
                disk.MEDIA: goal_media,
                disk.REPLICATION: goal_replication,
                disk.LEGACY_DISK_TYPE: goal_legacy_disk_type
            })
コード例 #12
0
def _ExecuteThreadCall(target_arg_tuple, call_id, queue, parent_log_context,
                       parent_benchmark_spec):
    """Function invoked in another thread by RunParallelThreads.

  Executes a specified function call and captures the traceback upon exception.

  Args:
    target_arg_tuple: (target, args, kwargs) tuple containing the function to
        call and the arguments to pass it.
    call_id: int. Index corresponding to the call in the thread_params argument
        of RunParallelThreads.
    queue: Queue. Receives a ThreadCallResult.
    parent_log_context: ThreadLogContext of the parent thread.
    parent_benchmark_spec: BenchmarkSpec of the parent thread.
  """
    target, args, kwargs = target_arg_tuple
    try:
        log_context = log_util.ThreadLogContext(parent_log_context)
        log_util.SetThreadLogContext(log_context)
        context.SetThreadBenchmarkSpec(parent_benchmark_spec)
        queue.put(ThreadCallResult(call_id, target(*args, **kwargs), None))
    except:
        queue.put(ThreadCallResult(call_id, None, traceback.format_exc()))
コード例 #13
0
 def CopyToCurrentThread(self):
   """Sets the thread context of the current thread."""
   log_util.SetThreadLogContext(log_util.ThreadLogContext(self.log_context))
   context.SetThreadBenchmarkSpec(self.benchmark_spec)
コード例 #14
0
ファイル: pkb.py プロジェクト: shaokaiyang/PerfKitBenchmarker
def RunBenchmark(spec, collector):
  """Runs a single benchmark and adds the results to the collector.

  Args:
    spec: The BenchmarkSpec object with run information.
    collector: The SampleCollector object to add samples to.
  """

  # Since there are issues with the handling SIGINT/KeyboardInterrupt (see
  # further dicussion in _BackgroundProcessTaskManager) this mechanism is
  # provided for defense in depth to force skip pending runs after SIGINT.
  for f in _SKIP_PENDING_RUNS_CHECKS:
    if f():
      logging.warning('Skipping benchmark.')
      return

  spec.status = benchmark_status.FAILED
  current_run_stage = stages.PROVISION
  # Modify the logger prompt for messages logged within this function.
  label_extension = '{}({}/{})'.format(
      spec.name, spec.sequence_number, spec.total_benchmarks)
  context.SetThreadBenchmarkSpec(spec)
  log_context = log_util.GetThreadLogContext()
  with log_context.ExtendLabel(label_extension):
    with spec.RedirectGlobalFlags():
      end_to_end_timer = timing_util.IntervalTimer()
      detailed_timer = timing_util.IntervalTimer()
      try:
        with end_to_end_timer.Measure('End to End'):
          if stages.PROVISION in FLAGS.run_stage:
            DoProvisionPhase(spec, detailed_timer)

          if stages.PREPARE in FLAGS.run_stage:
            current_run_stage = stages.PREPARE
            DoPreparePhase(spec, detailed_timer)

          if stages.RUN in FLAGS.run_stage:
            current_run_stage = stages.RUN
            DoRunPhase(spec, collector, detailed_timer)

          if stages.CLEANUP in FLAGS.run_stage:
            current_run_stage = stages.CLEANUP
            DoCleanupPhase(spec, detailed_timer)

          if stages.TEARDOWN in FLAGS.run_stage:
            current_run_stage = stages.TEARDOWN
            DoTeardownPhase(spec, detailed_timer)

        # Add timing samples.
        if (FLAGS.run_stage == stages.STAGES and
            timing_util.EndToEndRuntimeMeasurementEnabled()):
          collector.AddSamples(
              end_to_end_timer.GenerateSamples(), spec.name, spec)
        if timing_util.RuntimeMeasurementsEnabled():
          collector.AddSamples(
              detailed_timer.GenerateSamples(), spec.name, spec)

        # Add resource related samples.
        collector.AddSamples(spec.GetSamples(), spec.name, spec)

      except Exception as e:
        # Log specific type of failure, if known
        # TODO(dlott) Move to exception chaining with Python3 support
        if (isinstance(e, errors.Benchmarks.InsufficientCapacityCloudFailure)
            or 'InsufficientCapacityCloudFailure' in str(e)):
          spec.failed_substatus = (
              benchmark_status.FailedSubstatus.INSUFFICIENT_CAPACITY)
          spec.status_detail = str(e)
        elif (isinstance(e, errors.Benchmarks.QuotaFailure)
              or 'QuotaFailure' in str(e)):
          spec.failed_substatus = benchmark_status.FailedSubstatus.QUOTA
          spec.status_detail = str(e)

        # Resource cleanup (below) can take a long time. Log the error to give
        # immediate feedback, then re-throw.
        logging.exception('Error during benchmark %s', spec.name)
        if FLAGS.create_failed_run_samples:
          collector.AddSamples(MakeFailedRunSample(spec, str(e),
                                                   current_run_stage),
                               spec.name,
                               spec)
        # If the particular benchmark requests us to always call cleanup, do it
        # here.
        if stages.CLEANUP in FLAGS.run_stage and spec.always_call_cleanup:
          DoCleanupPhase(spec, detailed_timer)
        raise
      finally:
        # Deleting resources should happen first so any errors with publishing
        # don't prevent teardown.
        if stages.TEARDOWN in FLAGS.run_stage:
          spec.Delete()
        if FLAGS.publish_after_run:
          collector.PublishSamples()
        events.benchmark_end.send(benchmark_spec=spec)
        # Pickle spec to save final resource state.
        spec.Pickle()
  spec.status = benchmark_status.SUCCEEDED
コード例 #15
0
 def testSetGet(self):
     benchmark_spec = mock.MagicMock()
     context.SetThreadBenchmarkSpec(benchmark_spec)
     self.assertEqual(benchmark_spec, context.GetThreadBenchmarkSpec())