def testFlagOverride(self):
     config_spec = benchmark_config_spec.BenchmarkConfigSpec(
         NAME,
         flag_values=FLAGS,
         flags={'benchmark_spec_test_flag': 1},
         vm_groups={})
     spec = benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec, UID)
     self.assertEqual(FLAGS.benchmark_spec_test_flag, 0)
     with spec.RedirectGlobalFlags():
         self.assertEqual(FLAGS.benchmark_spec_test_flag, 1)
         FLAGS.benchmark_spec_test_flag = 2
         self.assertEqual(FLAGS.benchmark_spec_test_flag, 2)
     self.assertEqual(FLAGS.benchmark_spec_test_flag, 0)
    def testBackgroundWorkloadVanillaConfig(self):
        """ Test that nothing happens with the vanilla config """
        with mock_flags.PatchFlags() as mocked_flags:
            self.setupCommonFlags(mocked_flags)
            mocked_flags.background_cpu_threads = None
            config = configs.LoadConfig(ping_benchmark.BENCHMARK_CONFIG, {},
                                        NAME)
            spec = benchmark_spec.BenchmarkSpec(config, NAME, UID)
            spec.ConstructVirtualMachines()

            for vm in spec.vms:
                self.assertIsNone(vm.background_cpu_threads)
            self._CheckVMFromSpec(spec, 0)
    def setUp(self):
        super(_DiskMetadataTestCase, self).setUp()
        self.addCleanup(context.SetThreadBenchmarkSpec, None)

        p = mock.patch(vm_util.__name__ + '.GetTempDir',
                       return_value='/tmp/dir')
        p.start()
        self.addCleanup(p.stop)

        config_spec = benchmark_config_spec.BenchmarkConfigSpec(
            _BENCHMARK_NAME, flag_values=FLAGS, vm_groups={})
        self.benchmark_spec = benchmark_spec.BenchmarkSpec(
            mock.MagicMock(), config_spec, _BENCHMARK_UID)
Ejemplo n.º 4
0
 def setUp(self):
   self._mocked_flags = mock_flags.PatchTestCaseFlags(self)
   self._mocked_flags.cloud = providers.GCP
   self._mocked_flags.gcloud_path = 'test_gcloud'
   self._mocked_flags.os_type = os_types.DEBIAN
   self._mocked_flags.run_uri = 'aaaaaa'
   self._mocked_flags.gcp_instance_metadata = []
   self._mocked_flags.gcp_instance_metadata_from_file = []
   # Creating a VM object causes network objects to be added to the current
   # thread's benchmark spec. Create such a benchmark spec for these tests.
   self.addCleanup(context.SetThreadBenchmarkSpec, None)
   config_spec = benchmark_config_spec.BenchmarkConfigSpec(
       _BENCHMARK_NAME, flag_values=self._mocked_flags, vm_groups={})
   self._benchmark_spec = benchmark_spec.BenchmarkSpec(
       config_spec, _BENCHMARK_NAME, _BENCHMARK_UID)
    def testStaticVms(self):
        config = configs.LoadConfig(STATIC_VM_CONFIG, {}, NAME)
        spec = benchmark_spec.BenchmarkSpec(config, NAME, UID)
        spec.ConstructVirtualMachines()

        self.assertEqual(len(spec.vms), 4)

        vm0 = spec.vm_groups['group1'][0]
        vm1, vm2, vm3 = spec.vm_groups['group2']

        self.assertIsInstance(vm0, gce_vm.GceVirtualMachine)
        self.assertIsInstance(vm1, static_vm.StaticVirtualMachine)
        self.assertIsInstance(vm2, static_vm.RhelBasedStaticVirtualMachine)
        self.assertIsInstance(vm3, gce_vm.GceVirtualMachine)

        self.assertEqual(vm2.disk_specs[0].mount_point, '/scratch')
 def testWindowsVMCausesError(self):
     """ windows vm with background_cpu_threads raises exception """
     with mock_flags.PatchFlags() as mocked_flags:
         self.setupCommonFlags(mocked_flags)
         mocked_flags.background_cpu_threads = 1
         mocked_flags.os_type = benchmark_spec.WINDOWS
         config = configs.LoadConfig(ping_benchmark.BENCHMARK_CONFIG, {},
                                     NAME)
         spec = benchmark_spec.BenchmarkSpec(config, NAME, UID)
         spec.ConstructVirtualMachines()
         with self.assertRaises(Exception):
             spec.Prepare()
         with self.assertRaises(Exception):
             spec.StartBackgroundWorkload()
         with self.assertRaises(Exception):
             spec.StopBackgroundWorkload()
Ejemplo n.º 7
0
 def testBenchmarkSpecLoadsProvider(self):
   p = mock.patch(providers.__name__ + '.LoadProvider')
   p.start()
   self.addCleanup(p.stop)
   config = {
       'vm_groups': {
           'group1': {
               'cloud': 'AWS',
               'vm_count': 0,
               'vm_spec': {'AWS': {}}
           }
       }
   }
   spec = benchmark_spec.BenchmarkSpec(config, 'name', 'uid')
   spec.ConstructVirtualMachines()
   providers.LoadProvider.assert_called_with('aws')
def assertDiskMounts(benchmark_config, mount_point):
  """Test whether a disk mounts in a given configuration.

  Sets up a virtual machine following benchmark_config and then tests
  whether the path mount_point contains a working disk by trying to
  create a file there. Returns nothing if file creation works;
  otherwise raises an exception.

  Args:
    benchmark_config: a dict in the format of
      benchmark_spec.BenchmarkSpec. The config must specify exactly
      one virtual machine.
    mount_point: a path, represented as a string.

  Raises:
    RemoteCommandError if it cannot create a file at mount_point and
    verify that the file exists.

    AssertionError if benchmark_config does not specify exactly one
    virtual machine.
  """

  assert len(benchmark_config['vm_groups']) == 1
  vm_group = benchmark_config['vm_groups'].itervalues().next()
  assert vm_group.get('num_vms', 1) == 1
  m = mock.MagicMock()
  m.BENCHMARK_NAME = _BENCHMARK_NAME
  config_spec = benchmark_config_spec.BenchmarkConfigSpec(
      _BENCHMARK_NAME, flag_values=flags.FLAGS, **benchmark_config)
  spec = benchmark_spec.BenchmarkSpec(
      m, config_spec, _BENCHMARK_UID)
  with spec.RedirectGlobalFlags():
    try:
      spec.ConstructVirtualMachines()
      spec.Provision()

      vm = spec.vms[0]

      test_file_path = os.path.join(mount_point, 'test_file')
      vm.RemoteCommand('touch %s' % test_file_path)

      # This will raise RemoteCommandError if the test file does not
      # exist.
      vm.RemoteCommand('test -e %s' % test_file_path)

    finally:
      spec.Delete()
Ejemplo n.º 9
0
    def setUp(self):
        super(AwsVirtualMachineTestCase, self).setUp()
        FLAGS.cloud = providers.AWS
        FLAGS.os_type = os_types.DEBIAN
        FLAGS.run_uri = 'aaaaaa'
        FLAGS.temp_dir = 'tmp'
        p = mock.patch('perfkitbenchmarker.providers.aws.'
                       'util.IssueRetryableCommand')
        p.start()
        self.addCleanup(p.stop)
        p2 = mock.patch('perfkitbenchmarker.' 'vm_util.IssueCommand')
        p2.start()
        self.addCleanup(p2.stop)

        # VM Creation depends on there being a BenchmarkSpec.
        config_spec = benchmark_config_spec.BenchmarkConfigSpec(
            _BENCHMARK_NAME, flag_values=FLAGS, vm_groups={})
        self.spec = benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec,
                                                 _BENCHMARK_UID)
        self.addCleanup(context.SetThreadBenchmarkSpec, None)

        self.vm = aws_virtual_machine.AwsVirtualMachine(
            aws_virtual_machine.AwsVmSpec('test_vm_spec.AWS',
                                          zone='us-east-1a',
                                          machine_type='c3.large',
                                          spot_price=123.45))
        self.vm.id = 'i-foo'
        self.vm.image = 'ami-12345'
        self.vm.client_token = '00000000-1111-2222-3333-444444444444'
        network_mock = mock.MagicMock()
        network_mock.subnet = mock.MagicMock(id='subnet-id')
        placement_group = mock.MagicMock()
        placement_group.name = 'placement_group_name'
        placement_group.strategy = 'cluster'
        network_mock.placement_group = placement_group
        self.vm.network = network_mock
        self.vm.placement_group = placement_group

        self.response = self.open_json_data('aws-describe-instance.json')
        self.sir_response = self.open_json_data(
            'aws-describe-spot-instance-requests.json')
        self.vm.network.is_static = False
        self.vm.network.regional_network.vpc.default_security_group_id = 'sg-1234'
    def setUp(self):
        super(GCEVMFlagsTestCase, self).setUp()
        FLAGS.cloud = providers.GCP
        FLAGS.gcloud_path = 'test_gcloud'
        FLAGS.run_uri = 'aaaaaa'
        FLAGS.gcp_instance_metadata = []
        FLAGS.gcp_instance_metadata_from_file = []
        # Creating a VM object causes network objects to be added to the current
        # thread's benchmark spec. Create such a benchmark spec for these tests.
        self.addCleanup(context.SetThreadBenchmarkSpec, None)
        config_spec = benchmark_config_spec.BenchmarkConfigSpec(
            _BENCHMARK_NAME, flag_values=FLAGS, vm_groups={})
        self._benchmark_spec = benchmark_spec.BenchmarkSpec(
            mock.MagicMock(), config_spec, _BENCHMARK_UID)

        get_tmp_dir_mock = mock.patch(vm_util.__name__ + '.GetTempDir',
                                      return_value='TempDir')
        get_tmp_dir_mock.start()
        self.addCleanup(get_tmp_dir_mock.stop)
Ejemplo n.º 11
0
def DoPreparePhase(benchmark, info, name, timer):
    """Performs the Prepare phase of benchmark execution.

  Args:
    benchmark: The benchmark module.
    info: The dict returned by the benchmark module's GetInfo function.
    name: A string containing the benchmark name.
    timer: An IntervalTimer that measures the start and stop times of resource
      provisioning and the benchmark module's Prepare function.

  Returns:
    The BenchmarkSpec created for the benchmark.
  """
    logging.info('Preparing benchmark %s', name)
    with timer.Measure('Resource Provisioning'):
        spec = benchmark_spec.BenchmarkSpec(info)
        spec.Prepare()
    with timer.Measure('Benchmark Prepare'):
        benchmark.Prepare(spec)
    return spec
Ejemplo n.º 12
0
    def doAwsDiskTest(self, disk_type, machine_type, goal_media,
                      goal_replication, goal_legacy_disk_type):
        disk_spec = aws_disk.AwsDiskSpec(_COMPONENT,
                                         disk_size=2,
                                         disk_type=disk_type)

        context.SetThreadBenchmarkSpec(
            benchmark_spec.BenchmarkSpec({}, 'name', 'uid'))

        vm_spec = virtual_machine.BaseVmSpec('test_vm_spec.AWS',
                                             zone='us-east-1a',
                                             machine_type=machine_type)
        vm = aws_virtual_machine.DebianBasedAwsVirtualMachine(vm_spec)

        vm.CreateScratchDisk(disk_spec)

        self.assertEqual(
            vm.scratch_disks[0].metadata, {
                disk.MEDIA: goal_media,
                disk.REPLICATION: goal_replication,
                disk.LEGACY_DISK_TYPE: goal_legacy_disk_type
            })
Ejemplo n.º 13
0
    def setUp(self):
        mocked_flags = mock_flags.PatchTestCaseFlags(self)
        mocked_flags.cloud = providers.AWS
        mocked_flags.os_type = os_types.DEBIAN
        mocked_flags.run_uri = 'aaaaaa'
        mocked_flags.temp_dir = 'tmp'
        p = mock.patch('perfkitbenchmarker.providers.aws.'
                       'util.IssueRetryableCommand')
        p.start()
        self.addCleanup(p.stop)
        p2 = mock.patch('perfkitbenchmarker.' 'vm_util.IssueCommand')
        p2.start()
        self.addCleanup(p2.stop)

        # VM Creation depends on there being a BenchmarkSpec.
        config_spec = benchmark_config_spec.BenchmarkConfigSpec(
            _BENCHMARK_NAME, flag_values=mocked_flags, vm_groups={})
        self.spec = benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec,
                                                 _BENCHMARK_UID)
        self.addCleanup(context.SetThreadBenchmarkSpec, None)

        self.vm = aws_virtual_machine.AwsVirtualMachine(
            aws_virtual_machine.AwsVmSpec('test_vm_spec.AWS',
                                          zone='us-east-1a',
                                          machine_type='c3.large',
                                          spot_price=123.45))
        self.vm.id = 'i-foo'
        self.vm.image = 'ami-12345'
        self.vm.client_token = '00000000-1111-2222-3333-444444444444'
        network_mock = mock.MagicMock()
        network_mock.subnet = mock.MagicMock(id='subnet-id')
        placement_group = mock.MagicMock()
        placement_group.name = 'placement_group_name'
        network_mock.placement_group = placement_group
        self.vm.network = network_mock

        self.response = self.openJsonData('aws-describe-instance.json')
        self.sir_response =\
            self.openJsonData('aws-describe-spot-instance-requests.json')
  def setUp(self):
    self.saved_flag_values = flagsaver.save_flag_values()
    self.patches = []

    vm_prefix = linux_virtual_machine.__name__ + '.BaseLinuxMixin'
    self.patches.append(
        mock.patch(vm_prefix + '.FormatDisk'))
    self.patches.append(
        mock.patch(vm_prefix + '.MountDisk'))
    self.patches.append(
        mock.patch(
            util.__name__ + '.GetDefaultProject', side_effect='test_project'))

    # Patch subprocess.Popen to make sure we don't issue any commands to spin up
    # resources.
    self.patches.append(mock.patch('subprocess.Popen'))
    self.patches.append(
        mock.patch(vm_util.__name__ + '.GetTempDir', return_value='/tmp/dir'))

    self._PatchCloudSpecific()

    for p in self.patches:
      p.start()
      self.addCleanup(p.stop)

    # We need the disk class mocks to return new mocks each time they are
    # called. Otherwise all "disks" instantiated will be the same object.
    self._GetDiskClass().side_effect = (
        lambda *args, **kwargs: mock.MagicMock(is_striped=False))

    # VM Creation depends on there being a BenchmarkSpec.
    config_spec = benchmark_config_spec.BenchmarkConfigSpec(
        _BENCHMARK_NAME, flag_values=FLAGS, vm_groups={})
    self.spec = benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec,
                                             _BENCHMARK_UID)
    self.addCleanup(context.SetThreadBenchmarkSpec, None)
    self.addCleanup(flagsaver.restore_flag_values, self.saved_flag_values)
Ejemplo n.º 15
0
def _GetBenchmarkSpec(benchmark_config, benchmark_name, benchmark_uid):
    """Creates a BenchmarkSpec or loads one from a file.

  During the provision stage, creates a BenchmarkSpec from the provided
  configuration. During any later stage, loads the BenchmarkSpec that was
  created during the provision stage from a file.

  Args:
    benchmark_config: A Python dictionary representation of the configuration
        for the benchmark. For a complete explanation, see
        perfkitbenchmarker/configs/__init__.py.
    benchmark_name: string. Name of the benchmark.
    benchmark_uid: string. Identifies a specific run of a benchmark.

  Returns:
    The created or loaded BenchmarkSpec.
  """
    if FLAGS.run_stage in (STAGE_ALL, STAGE_PROVISION):
        return benchmark_spec.BenchmarkSpec(benchmark_config, benchmark_name,
                                            benchmark_uid)
    else:
        # TODO(skschneider): Build BenchmarkConfigSpec before RunBenchmark.
        config = benchmark_config_spec.BenchmarkConfigSpec(benchmark_name,
                                                           flag_values=FLAGS,
                                                           **benchmark_config)
        try:
            return benchmark_spec.BenchmarkSpec.GetSpecFromFile(
                benchmark_uid, config)
        except IOError:
            if FLAGS.run_stage == STAGE_PREPARE:
                logging.error(
                    'We were unable to load the BenchmarkSpec. This may be related '
                    'to two additional run stages which have recently been added. '
                    'Please make sure to run the stage "provision" before "prepare". '
                    'Similarly, make sure to run "teardown" after "cleanup".')
            raise
Ejemplo n.º 16
0
    def testPDSSD(self):
        config = {
            'vm_groups': {
                'vm_group_1': {
                    'cloud': 'GCP',
                    'vm_spec': {
                        'GCP': {
                            'machine_type': 'test_machine_type',
                        }
                    },
                    'disk_spec': {
                        'GCP': {
                            'disk_type': 'remote_ssd',
                            'disk_size': 2,
                        }
                    }
                }
            }
        }

        spec = benchmark_spec.BenchmarkSpec(config, 'name', 'uid')
        spec.ConstructVirtualMachines()

        self.assertEquals(spec.vms[0].disk_specs[0].disk_type, 'pd-ssd')
Ejemplo n.º 17
0
    def testEBSStandard(self):
        config = {
            'vm_groups': {
                'vm_group_1': {
                    'cloud': 'AWS',
                    'vm_spec': {
                        'AWS': {
                            'zone': 'us-east-1a'
                        }
                    },
                    'disk_spec': {
                        'AWS': {
                            'disk_type': 'standard',
                            'disk_size': 2
                        }
                    }
                }
            }
        }

        spec = benchmark_spec.BenchmarkSpec(config, 'name', 'uid')
        spec.ConstructVirtualMachines()

        self.assertEquals(spec.vms[0].disk_specs[0].disk_type, 'standard')
    def testRun(self):
        FLAGS.run_uri = '12345678'
        FLAGS.zone = ['us-central1-a']
        remote_command = mock.PropertyMock(return_value=(self.iperf_csv_text,
                                                         ''))
        client = mock.Mock(RemoteCommand=remote_command,
                           machine_type='n2-standard-2')
        server = mock.Mock(internal_ip='10.0.0.1',
                           machine_type='n2-standard-4')
        benchmark_module = mock.Mock(BENCHMARK_NAME='cloud_harmony_iperf')
        benchmark_config = mock.Mock(vm_groups={},
                                     relational_db=mock.Mock(vm_groups={}))
        spec = benchmark_spec.BenchmarkSpec(benchmark_module, benchmark_config,
                                            'abcdefg')
        spec.vm_groups = {'client': [client], 'server': [server]}
        results = cloudharmony_iperf_benchmark._Run(spec)
        client.RobustRemoteCommand.assert_called_with(
            'iperf/run.sh  '
            f'--iperf_server 10.0.0.1:{cloudharmony_iperf_benchmark._PORT} '
            '--meta_compute_service Google Compute Engine '
            '--meta_compute_service_id google:compute '
            '--meta_instance_id n2-standard-2 '
            '--meta_provider Google Cloud Platform '
            '--meta_provider_id google '
            '--meta_region us-central1 '
            '--meta_zone us-central1-a '
            '--meta_test_id 12345678 '
            '--iperf_server_instance_id n2-standard-4 '
            '--iperf_server_region us-central1-a '
            '--iperf_time 10 '
            '--iperf_warmup 0 '
            '--iperf_test TCP '
            '--iperf_parallel 1 '
            '--tcp_bw_file /tmp/tcpbw '
            '--wkhtml_xvfb '
            '--verbose')
        self.assertLen(results, 2)

        expected_gartner_metadata = [{
            'bandwidth_direction': 'up',
            'bandwidth_max': 9894.384,
            'bandwidth_mean': 9735.5,
            'bandwidth_median': 9888.76,
            'bandwidth_min': 8349.13,
            'bandwidth_p10': 8349.13,
            'bandwidth_p25': 9888.001,
            'bandwidth_p75': 9889.2,
            'bandwidth_p90': 9889.8,
            'bandwidth_stdev': 487,
            'benchmark_version': 1.0,
            'collectd_rrd': '',
            'cpu_client': 34.346987,
            'cpu_server': 2.880331,
            'iperf_bandwidth': '',
            'iperf_cmd': 'iperf3 -c 10.240.0.14 -J -i 1',
            'iperf_concurrency': 1,
            'iperf_interval': 1,
            'iperf_len': '',
            'iperf_mss': '',
            'iperf_nodelay': '',
            'iperf_num': '',
            'iperf_parallel': 1,
            'iperf_reverse': '',
            'iperf_server': '10.240.0.14',
            'iperf_server_instance_id': 'n1-standard-2',
            'iperf_server_os': 'Ubuntu 18.04.5 LTS',
            'iperf_server_provider': '',
            'iperf_server_provider_id': 'google',
            'iperf_server_region': 'us-central1',
            'iperf_server_service': '',
            'iperf_server_service_id': 'google:compute',
            'iperf_time': 10,
            'iperf_tos': '',
            'iperf_udp': '',
            'iperf_version': '3.1.3',
            'iperf_warmup': 0,
            'iperf_window': '',
            'iperf_zerocopy': '',
            'iteration': 1,
            'jitter_max': '',
            'jitter_mean': '',
            'jitter_median': '',
            'jitter_min': '',
            'jitter_p10': '',
            'jitter_p25': '',
            'jitter_p75': '',
            'jitter_p90': '',
            'jitter_stdev': '',
            'loss_max': '',
            'loss_mean': '',
            'loss_median': '',
            'loss_min': '',
            'loss_p10': '',
            'loss_p25': '',
            'loss_p75': '',
            'loss_p90': '',
            'loss_stdev': '',
            'meta_compute_service': '',
            'meta_compute_service_id': 'google:compute',
            'meta_cpu': 'Intel Xeon 2.30GHz',
            'meta_cpu_cache': '46080 KB',
            'meta_cpu_cores': 2,
            'meta_cpu_speed': 2300.0,
            'meta_hostname': 'pkb-3b246db4-0',
            'meta_instance_id': 'n1-standard-2',
            'meta_memory': '7 GB',
            'meta_memory_gb': 7,
            'meta_memory_mb': 7457,
            'meta_os': 'Ubuntu 18.04.5 LTS',
            'meta_provider': '',
            'meta_provider_id': 'google',
            'meta_region': 'us-central1',
            'meta_resource_id': '',
            'meta_run_group_id': '',
            'meta_run_id': '',
            'meta_test_id': '3b246db4',
            'report_pdf': '',
            'report_zip': '',
            'same_instance_id': 1,
            'same_os': 1,
            'same_provider': 1,
            'same_region': 1,
            'same_service': 1,
            'test_started': '2020-10-30 00:33:51',
            'test_stopped': '2020-10-30 00:34:01',
            'transfer': 11605.6
        }, {
            'bandwidth_direction': 'down',
            'bandwidth_max': 9910.723,
            'bandwidth_mean': 9730.44,
            'bandwidth_median': 9866.29,
            'bandwidth_min': 8349.13,
            'bandwidth_p10': 8349.13,
            'bandwidth_p25': 9888.001,
            'bandwidth_p75': 9889.76,
            'bandwidth_p90': 9889.8,
            'bandwidth_stdev': 487,
            'benchmark_version': 1.0,
            'collectd_rrd': '',
            'cpu_client': 34.346987,
            'cpu_server': 2.880331,
            'iperf_bandwidth': '',
            'iperf_cmd': 'iperf3 -c 10.240.0.14 -J -i 1',
            'iperf_concurrency': 1,
            'iperf_interval': 1,
            'iperf_len': '',
            'iperf_mss': '',
            'iperf_nodelay': '',
            'iperf_num': '',
            'iperf_parallel': 1,
            'iperf_reverse': '',
            'iperf_server': '10.240.0.14',
            'iperf_server_instance_id': 'n1-standard-2',
            'iperf_server_os': 'Ubuntu 18.04.5 LTS',
            'iperf_server_provider': '',
            'iperf_server_provider_id': 'google',
            'iperf_server_region': 'us-central1',
            'iperf_server_service': '',
            'iperf_server_service_id': 'google:compute',
            'iperf_time': 10,
            'iperf_tos': '',
            'iperf_udp': '',
            'iperf_version': '3.1.3',
            'iperf_warmup': 0,
            'iperf_window': '',
            'iperf_zerocopy': '',
            'iteration': 1,
            'jitter_max': '',
            'jitter_mean': '',
            'jitter_median': '',
            'jitter_min': '',
            'jitter_p10': '',
            'jitter_p25': '',
            'jitter_p75': '',
            'jitter_p90': '',
            'jitter_stdev': '',
            'loss_max': '',
            'loss_mean': '',
            'loss_median': '',
            'loss_min': '',
            'loss_p10': '',
            'loss_p25': '',
            'loss_p75': '',
            'loss_p90': '',
            'loss_stdev': '',
            'meta_compute_service': '',
            'meta_compute_service_id': 'google:compute',
            'meta_cpu': 'Intel Xeon 2.30GHz',
            'meta_cpu_cache': '46080 KB',
            'meta_cpu_cores': 2,
            'meta_cpu_speed': 2300.0,
            'meta_hostname': 'pkb-4c137ef3-0',
            'meta_instance_id': 'n1-standard-2',
            'meta_memory': '7 GB',
            'meta_memory_gb': 7,
            'meta_memory_mb': 7457,
            'meta_os': 'Ubuntu 18.04.5 LTS',
            'meta_provider': '',
            'meta_provider_id': 'google',
            'meta_region': 'us-central1',
            'meta_resource_id': '',
            'meta_run_group_id': '',
            'meta_run_id': '',
            'meta_test_id': '4c137ef3',
            'report_pdf': '',
            'report_zip': '',
            'same_instance_id': 1,
            'same_os': 1,
            'same_provider': 1,
            'same_region': 1,
            'same_service': 1,
            'test_started': '2021-02-25 4:20:12',
            'test_stopped': '2021-02-25 4:22:01',
            'transfer': 11605.5
        }]

        self.assertListEqual(expected_gartner_metadata, results)
 def setUp(self):
     super(GceNetworkTest, self).setUp()
     # need a benchmarkspec in the context to run
     config_spec = benchmark_config_spec.BenchmarkConfigSpec(
         'cluster_boot', flag_values=FLAGS)
     benchmark_spec.BenchmarkSpec(mock.Mock(), config_spec, 'uid')
Ejemplo n.º 20
0
 def _CreateBenchmarkSpec(self, benchmark_config_yaml):
     config = configs.LoadConfig(benchmark_config_yaml, {}, NAME)
     config_spec = benchmark_config_spec.BenchmarkConfigSpec(
         NAME, flag_values=FLAGS, **config)
     return benchmark_spec.BenchmarkSpec(ping_benchmark, config_spec, UID)
Ejemplo n.º 21
0
 def _CreateBenchmarkSpecFromConfigDict(self, config_dict, benchmark_name):
   config_spec = benchmark_config_spec.BenchmarkConfigSpec(
       benchmark_name, flag_values=self._mocked_flags, **config_dict)
   return benchmark_spec.BenchmarkSpec(config_spec, benchmark_name, UID)
Ejemplo n.º 22
0
 def _CreateBenchmarkSpec(self, benchmark_config_yaml):
     config = configs.LoadConfig(benchmark_config_yaml, {}, NAME)
     config_spec = benchmark_config_spec.BenchmarkConfigSpec(
         NAME, flag_values=self._mocked_flags, **config)
     return benchmark_spec.BenchmarkSpec(config_spec, NAME, UID)
Ejemplo n.º 23
0
 def setUp(self):
     # VM Creation depends on there being a BenchmarkSpec.
     self.spec = benchmark_spec.BenchmarkSpec({}, 'name', 'benchmark_uid')
     self.addCleanup(context.SetThreadBenchmarkSpec, None)
Ejemplo n.º 24
0
def RunBenchmark(benchmark, collector, sequence_number, total_benchmarks,
                 benchmark_config, benchmark_uid):
    """Runs a single benchmark and adds the results to the collector.

  Args:
    benchmark: The benchmark module to be run.
    collector: The SampleCollector object to add samples to.
    sequence_number: The sequence number of when the benchmark was started
      relative to the other benchmarks in the suite.
    total_benchmarks: The total number of benchmarks in the suite.
    benchmark_config: The config to run the benchmark with.
    benchmark_uid: An identifier unique to this run of the benchmark even
      if the same benchmark is run multiple times with different configs.
  """
    benchmark_name = benchmark.BENCHMARK_NAME

    # Modify the logger prompt for messages logged within this function.
    label_extension = '{}({}/{})'.format(benchmark_name, sequence_number,
                                         total_benchmarks)
    log_context = log_util.GetThreadLogContext()
    with log_context.ExtendLabel(label_extension):
        # Optional prerequisite checking.
        check_prereqs = getattr(benchmark, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                check_prereqs()
            except:
                logging.exception('Prerequisite check failed for %s',
                                  benchmark_name)
                raise

        end_to_end_timer = timing_util.IntervalTimer()
        detailed_timer = timing_util.IntervalTimer()
        spec = None
        try:
            with end_to_end_timer.Measure('End to End'):
                if FLAGS.run_stage in [STAGE_ALL, STAGE_PROVISION]:
                    # It is important to create the spec outside of DoProvisionPhase
                    # because if DoPreparePhase raises an exception, we still need
                    # a reference to the spec in order to delete it in the "finally"
                    # section below.
                    spec = benchmark_spec.BenchmarkSpec(
                        benchmark_config, benchmark_name, benchmark_uid)
                    spec.ConstructVirtualMachines()
                    DoProvisionPhase(benchmark_name, spec, detailed_timer)
                else:
                    try:
                        spec = benchmark_spec.BenchmarkSpec.GetSpecFromFile(
                            benchmark_uid)
                    except IOError:
                        if FLAGS.run_stage == STAGE_PREPARE:
                            logging.error(
                                'We were unable to load the BenchmarkSpec. This may be '
                                'related to two additional run stages which have recently '
                                'been added. Please make sure to run the stage "provision" '
                                'before "prepare". Similarly, make sure to run "teardown" '
                                'after "cleanup".')
                        raise

                if FLAGS.run_stage in [STAGE_ALL, STAGE_PREPARE]:
                    DoPreparePhase(benchmark, benchmark_name, spec,
                                   detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_RUN]:
                    DoRunPhase(benchmark, benchmark_name, spec, collector,
                               detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
                    DoCleanupPhase(benchmark, benchmark_name, spec,
                                   detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_TEARDOWN]:
                    DoTeardownPhase(benchmark_name, spec, detailed_timer)

            # Add samples for any timed interval that was measured.
            include_end_to_end = timing_util.EndToEndRuntimeMeasurementEnabled(
            )
            include_runtimes = timing_util.RuntimeMeasurementsEnabled()
            include_timestamps = timing_util.TimestampMeasurementsEnabled()
            if FLAGS.run_stage == STAGE_ALL:
                collector.AddSamples(
                    end_to_end_timer.GenerateSamples(
                        include_runtime=include_end_to_end or include_runtimes,
                        include_timestamps=include_timestamps), benchmark_name,
                    spec)
            collector.AddSamples(
                detailed_timer.GenerateSamples(include_runtimes,
                                               include_timestamps),
                benchmark_name, spec)

        except:
            # Resource cleanup (below) can take a long time. Log the error to give
            # immediate feedback, then re-throw.
            logging.exception('Error during benchmark %s', benchmark_name)
            # If the particular benchmark requests us to always call cleanup, do it
            # here.
            if (FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP] and spec
                    and spec.always_call_cleanup):
                DoCleanupPhase(benchmark, benchmark_name, spec, detailed_timer)
            raise
        finally:
            if spec:
                if FLAGS.run_stage in [STAGE_ALL, STAGE_TEARDOWN]:
                    spec.Delete()
                # Pickle spec to save final resource state.
                spec.PickleSpec()
Ejemplo n.º 25
0
 def makeSpec(self, yaml_benchmark_config=ping_benchmark.BENCHMARK_CONFIG):
   config = configs.LoadConfig(yaml_benchmark_config, {}, NAME)
   spec = benchmark_spec.BenchmarkSpec(config, NAME, UID)
   spec.ConstructVirtualMachines()
   return spec
 def setUp(self):
   self.addCleanup(context.SetThreadBenchmarkSpec, None)
   config_spec = benchmark_config_spec.BenchmarkConfigSpec(
       _BENCHMARK_NAME, flag_values=mock_flags.MockFlags(), vm_groups={})
   self.benchmark_spec = benchmark_spec.BenchmarkSpec(
       mock.MagicMock(), config_spec, _BENCHMARK_UID)
Ejemplo n.º 27
0
def RunBenchmark(benchmark, collector, sequence_number, total_benchmarks):
    """Runs a single benchmark and adds the results to the collector.

  Args:
    benchmark: The benchmark module to be run.
    collector: The SampleCollector object to add samples to.
    sequence_number: The sequence number of when the benchmark was started
      relative to the other benchmarks in the suite.
    total_benchmarks: The total number of benchmarks in the suite.
  """
    benchmark_info = benchmark.GetInfo()
    if not ValidateBenchmarkInfo(benchmark_info):
        return
    benchmark_name = benchmark_info['name']

    # Modify the logger prompt for messages logged within this function.
    label_extension = '{}({}/{})'.format(benchmark_name, sequence_number,
                                         total_benchmarks)
    log_context = log_util.GetThreadLogContext()
    with log_context.ExtendLabel(label_extension):
        # Optional prerequisite checking.
        check_prereqs = getattr(benchmark, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                check_prereqs()
            except:
                logging.exception('Prerequisite check failed for %s',
                                  benchmark_name)
                raise

        end_to_end_timer = timing_util.IntervalTimer()
        detailed_timer = timing_util.IntervalTimer()
        spec = None
        try:
            with end_to_end_timer.Measure('End to End'):
                if FLAGS.run_stage in [STAGE_ALL, STAGE_PREPARE]:
                    # It is important to create the spec outside of DoPreparePhase
                    # because if DoPreparePhase raises an exception, we still need
                    # a reference to the spec in order to delete it in the "finally"
                    # section below.
                    spec = benchmark_spec.BenchmarkSpec(benchmark_info)
                    DoPreparePhase(benchmark, benchmark_name, spec,
                                   detailed_timer)
                else:
                    spec = benchmark_spec.BenchmarkSpec.GetSpecFromFile(
                        benchmark_name)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_RUN]:
                    DoRunPhase(benchmark, benchmark_name, spec, collector,
                               detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
                    DoCleanupPhase(benchmark, benchmark_name, spec,
                                   detailed_timer)

            # Add samples for any timed interval that was measured.
            include_end_to_end = timing_util.EndToEndRuntimeMeasurementEnabled(
            )
            include_runtimes = timing_util.RuntimeMeasurementsEnabled()
            include_timestamps = timing_util.TimestampMeasurementsEnabled()
            if FLAGS.run_stage == STAGE_ALL:
                collector.AddSamples(
                    end_to_end_timer.GenerateSamples(
                        include_runtime=include_end_to_end or include_runtimes,
                        include_timestamps=include_timestamps), benchmark_name,
                    spec)
            collector.AddSamples(
                detailed_timer.GenerateSamples(include_runtimes,
                                               include_timestamps),
                benchmark_name, spec)

        except Exception:
            # Resource cleanup (below) can take a long time. Log the error to give
            # immediate feedback, then re-throw.
            logging.exception('Error during benchmark %s', benchmark_name)
            # If the particular benchmark requests us to always call cleanup, do it
            # here.
            if (FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP] and spec
                    and spec.always_call_cleanup):
                DoCleanupPhase(benchmark, benchmark_name, spec, detailed_timer)
            raise
        finally:
            if spec:
                if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
                    spec.Delete()
                # Pickle spec to save final resource state.
                spec.PickleSpec()
 def _CreateBenchmarkSpecFromConfigDict(self, config_dict, benchmark_name):
     config_spec = benchmark_config_spec.BenchmarkConfigSpec(
         benchmark_name, flag_values=FLAGS, **config_dict)
     benchmark_module = next((b for b in linux_benchmarks.BENCHMARKS
                              if b.BENCHMARK_NAME == benchmark_name))
     return benchmark_spec.BenchmarkSpec(benchmark_module, config_spec, UID)
 def _CreateBenchmarkSpecFromConfigDict(self, config_dict, benchmark_name):
   config_spec = benchmark_config_spec.BenchmarkConfigSpec(
       benchmark_name, flag_values=FLAGS, **config_dict)
   return benchmark_spec.BenchmarkSpec(mock.MagicMock(), config_spec, UID)
Ejemplo n.º 30
0
 def testBackgroundWorkloadConfigBadIp(self):
   """ Check that the config with invalid ip type gets an error """
   config = configs.LoadConfig(CONFIG_WITH_BACKGROUND_NETWORK_BAD_IPFLAG,
                               {}, NAME)
   with self.assertRaises(errors.Config.InvalidValue):
     benchmark_spec.BenchmarkSpec(config, NAME, UID)