def setUp(self): mocked_flags = mock_flags.PatchTestCaseFlags(self) mocked_flags.object_storage_scenario = 'api_multistream' mocked_flags.object_storage_multistream_objects_per_stream = 100 mocked_flags.object_storage_object_sizes = {'1KB': '100%'} mocked_flags.object_storage_multistream_num_streams = 10
def setUp(self): self.mock_flags = mock_flags.PatchTestCaseFlags(self) self.mock_flags.gcp_instance_metadata_from_file = '' self.mock_flags.gcp_instance_metadata = '' self.mock_flags.gcloud_path = 'gcloud' p = mock.patch(gce_virtual_machine.__name__ + '.gce_network.GceNetwork.GetNetwork') self.mock_get_network = p.start() self.addCleanup(p.stop) p = mock.patch(gce_virtual_machine.__name__ + '.gce_network.GceFirewall.GetFirewall') self.mock_get_firewall = p.start() self.addCleanup(p.stop) self.spec = gce_virtual_machine.GceVmSpec( _COMPONENT, machine_type='fake-machine-type') p = mock.patch(gce_virtual_machine.__name__ + '.linux_vm.BaseLinuxMixin._GetNumCpus') self.mock_get_num_cpus = p.start() self.addCleanup(p.stop) get_tmp_dir_mock = mock.patch(vm_util.__name__ + '.GetTempDir') get_tmp_dir_mock.start() self.addCleanup(get_tmp_dir_mock.stop)
def setUp(self): self.last_call = 0 super(TestBackgroundWorkloadFramework, self).setUp() self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags.os_type = os_types.DEBIAN self.mocked_flags.cloud = providers.GCP self.addCleanup(context.SetThreadBenchmarkSpec, None)
def setUp(self): self.mock_flags = mock_flags.PatchTestCaseFlags(self) self.mock_flags['hbase_use_stable'].parse(False) self.mock_flags['hbase_version'].parse('1.3.2.1') p = mock.patch.object(urllib2, 'urlopen') self.mock_url_open = p.start() self.addCleanup(p.stop)
def setUp(self): self.flags = mock_flags.PatchTestCaseFlags(self) self.flags.hpcc_math_library = 'openblas' path = os.path.join(os.path.dirname(__file__), '../data', 'hpcc-sample.txt') with open(path) as fp: self.contents = fp.read()
def setUp(self): mocked_flags = mock_flags.PatchTestCaseFlags(self) mocked_flags.object_storage_multistream_objects_per_stream = 100 mocked_flags.object_storage_object_sizes = {'1KB': '100%'} mocked_flags.object_storage_streams_per_vm = 1 mocked_flags.num_vms = 1 mocked_flags.object_storage_object_naming_scheme = 'sequential_by_stream'
def setUp(self): self.issue_cmd = mock.Mock() self.aws_network_spec = self._CreatePatched(aws_network, 'AwsNetwork') self.mock_flags = mock_flags.PatchTestCaseFlags(self) mock_network = mock.Mock() mock_network.subnet.id = 'subnet1' mock_network.vpc.default_security_group_id = 'group1' self.aws_network_spec.GetNetworkFromNetworkSpec.return_value = mock_network
def setUp(self): self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags['default_timeout'].parse(0) # due to @retry patcher = mock.patch.object(LinuxVM, 'RemoteHostCommand') self.remote_command = patcher.start() self.addCleanup(patcher.stop) self.remote_command.side_effect = [('', None, 0), ('', None, 0)] self.vm = LinuxVM()
def runTest(self, sysctl, calls): self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags['sysctl'].parse(sysctl) vm = LinuxVM() with mock.patch.object(vm, 'RemoteCommand') as remote_command: vm.DoSysctls() self.assertEqual(remote_command.call_args_list, calls)
def setUp(self): self.flags = mock_flags.PatchTestCaseFlags(self) directory = os.path.join(os.path.dirname(__file__), '..', 'data') path = os.path.join(directory, 'dstat-result.csv') self.collector = dstat._DStatCollector(output_directory=directory) self.collector._role_mapping['test_vm0'] = path events.TracingEvent.events = [] self.samples = []
def testNoSysctl(self): self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags.sysctl = [] vm = LinuxVM() with mock.patch.object(vm, 'RemoteCommand') as remote_command: vm.DoSysctls() self.assertEqual(remote_command.call_args_list, [])
def setUp(self): super(TestBackgroundNetworkWorkload, self).setUp() self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags.os_type = os_types.DEBIAN self.mocked_flags.cloud = providers.GCP p = patch(util.__name__ + '.GetDefaultProject') p.start() self.addCleanup(p.stop) self.addCleanup(context.SetThreadBenchmarkSpec, None)
def setUp(self): self._mocked_flags = mock_flags.PatchTestCaseFlags(self) self._mocked_flags.cloud = providers.GCP self._mocked_flags.os_type = os_types.DEBIAN self._mocked_flags.temp_dir = 'tmp' p = patch(util.__name__ + '.GetDefaultProject') p.start() self.addCleanup(context.SetThreadBenchmarkSpec, None) self.addCleanup(p.stop)
def setUp(self): self._initial_pool = StaticVirtualMachine.vm_pool StaticVirtualMachine.vm_pool.clear() p = mock.patch(vm_util.__name__ + '.GetTempDir') p.start() self.addCleanup(p.stop) mocked_flags = mock_flags.PatchTestCaseFlags(self) mocked_flags.image = 'test_image' mocked_flags.os_type = 'debian'
def setUp(self): self.raw_result = """ [{"latency": 0.1, "operation": "upload", "stream_num": 1, "start_time": 5.0, "size": 1}, {"latency": 0.1, "operation": "upload", "stream_num": 1, "start_time": 10.0, "size": 1}]""" # noqa: line too long mocked_flags = mock_flags.PatchTestCaseFlags(self) mocked_flags.object_storage_scenario = 'api_multistream' mocked_flags.object_storage_multistream_objects_per_stream = 100 mocked_flags.object_storage_object_sizes = '1B' mocked_flags.object_storage_multistream_num_streams = 10
def setUp(self): self.flags = mock_flags.PatchTestCaseFlags(self) self.flags.project = '' self.flags.run_uri = '123' self.flags.gcloud_path = 'gcloud' mock_db_spec_attrs = self.createMySQLSpecDict() self.mock_db_spec = mock.Mock( spec=benchmark_config_spec._ManagedRelationalDbSpec) self.mock_db_spec.configure_mock(**mock_db_spec_attrs)
def setUp(self): self.flags = mock_flags.PatchTestCaseFlags(self) self.flags.run_uri = '123' self.flags.project = '' self.flags.tpu_cores_per_donut = 8 self.flags.gcloud_path = 'gcloud' mock_tpu_spec_attrs = self.CreateTpuSpecDict() self.mock_tpu_spec = mock.Mock( spec=benchmark_config_spec._TpuGroupSpec) self.mock_tpu_spec.configure_mock(**mock_tpu_spec_attrs)
def setUp(self): self.last_call = 0 super(TestBackgroundWorkloadFramework, self).setUp() self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags.os_type = os_types.DEBIAN self.mocked_flags.cloud = providers.GCP self.mocked_flags.temp_dir = 'tmp' self.addCleanup(context.SetThreadBenchmarkSpec, None) p = mock.patch(util.__name__ + '.GetDefaultProject') p.start() self.addCleanup(p.stop)
def setUp(self): self.mock_flags = mock_flags.PatchTestCaseFlags(self) self.addCleanup(context.SetThreadBenchmarkSpec, None) p = mock.patch(vm_util.__name__ + '.GetTempDir') p.start() self.addCleanup(p.stop) config_spec = benchmark_config_spec.BenchmarkConfigSpec( _BENCHMARK_NAME, flag_values=self.mock_flags, vm_groups={}) self.benchmark_spec = benchmark_spec.BenchmarkSpec( mock.MagicMock(), config_spec, _BENCHMARK_UID)
def setUp(self): mocked_flags = mock_flags.PatchTestCaseFlags(self) mocked_flags.project = 'project' mocked_flags.redis_failover_style = cloud_redis.Failover.FAILOVER_NONE mock_spec = mock.Mock( spec=benchmark_config_spec._CloudRedisSpec) mock_spec.redis_name = 'foobar' mock_spec.redis_tier = 'tier' mock_spec.redis_size_gb = 5 mock_spec.redis_version = 'version' mock_spec.client_vm = mock.Mock() with mock_flags.PatchFlags(mocked_flags): self.redis = gcp_cloud_redis.CloudRedis(mock_spec)
def testSysctl(self): self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags.sysctl = [ 'vm.dirty_background_ratio=10', 'vm.dirty_ratio=25' ] vm = LinuxVM() with mock.patch.object(vm, 'RemoteCommand') as remote_command: vm.DoSysctls() self.assertEqual(remote_command.call_args_list, [ mock.call('sudo sysctl -w vm.dirty_background_ratio=10 ' 'vm.dirty_ratio=25') ])
def setUp(self): self.mock_flags = mock_flags.PatchTestCaseFlags(self) p = mock.patch(gce_virtual_machine.__name__ + '.gce_network.GceNetwork.GetNetwork') self.mock_get_network = p.start() self.addCleanup(p.stop) p = mock.patch(gce_virtual_machine.__name__ + '.gce_network.GceFirewall.GetFirewall') self.mock_get_firewall = p.start() self.addCleanup(p.stop) get_tmp_dir_mock = mock.patch(vm_util.__name__ + '.GetTempDir') get_tmp_dir_mock.start() self.addCleanup(get_tmp_dir_mock.stop)
def testRunCountTest(self): with mock.patch('os.path.isfile', return_value=True),\ mock.patch(PKB + '.data.ResourcePath', return_value=['a', 'b']),\ mock.patch(MOD_PATH + '.S3StorageBenchmark.Prepare') as Prepare,\ mock.patch(MOD_PATH + '.S3StorageBenchmark.Run') as Run, mock.patch( MOD_PATH + '.S3StorageBenchmark.Cleanup') as Cleanup: flags = mock_flags.PatchTestCaseFlags(self) flags.storage = providers.AWS flags.object_storage_scenario = 'all' vm_spec = mock.MagicMock(spec=benchmark_spec.BenchmarkSpec) vm0 = mock.MagicMock() vm_spec.vms = [vm0] object_storage_service_benchmark.Prepare(vm_spec) self.assertEqual(Prepare.call_count, 1) object_storage_service_benchmark.Run(vm_spec) self.assertEqual(Run.call_count, 1) object_storage_service_benchmark.Cleanup(vm_spec) self.assertEqual(Cleanup.call_count, 1)
def runTest(self, set_files, calls): """Run a SetFiles test. Args: set_files: the value of FLAGS.set_files calls: a list of mock.call() objects giving the expected calls to vm.RemoteCommand() for the test. """ self.mocked_flags = mock_flags.PatchTestCaseFlags(self) self.mocked_flags.set_files = set_files vm = LinuxVM() with mock.patch.object(vm, 'RemoteCommand') as remote_command: vm.SetFiles() self.assertItemsEqual( # use assertItemsEqual because order is undefined remote_command.call_args_list, calls)
def setUp(self): self._mocked_flags = mock_flags.PatchTestCaseFlags(self) self._mocked_flags.cloud = providers.GCP self._mocked_flags.gcloud_path = 'test_gcloud' self._mocked_flags.os_type = os_types.DEBIAN self._mocked_flags.run_uri = 'aaaaaa' self._mocked_flags.gcp_instance_metadata = [] self._mocked_flags.gcp_instance_metadata_from_file = [] # Creating a VM object causes network objects to be added to the current # thread's benchmark spec. Create such a benchmark spec for these tests. self.addCleanup(context.SetThreadBenchmarkSpec, None) config_spec = benchmark_config_spec.BenchmarkConfigSpec( _BENCHMARK_NAME, flag_values=self._mocked_flags, vm_groups={}) self._benchmark_spec = benchmark_spec.BenchmarkSpec( mock.MagicMock(), config_spec, _BENCHMARK_UID) get_tmp_dir_mock = mock.patch(vm_util.__name__ + '.GetTempDir') get_tmp_dir_mock.start() self.addCleanup(get_tmp_dir_mock.stop)
def testReadFromFile_UnknownOsTypeDefaultsToLinuxRequiredKeys(self): mocked_flags = mock_flags.PatchTestCaseFlags(self) mocked_flags.os_type = 'unknown_os_type' s = ('[{' ' "ip_address": "174.12.14.1", ' ' "user_name": "perfkitbenchmarker", ' ' "keyfile_path": "perfkitbenchmarker.pem"' '}]') fp = BytesIO(s) StaticVirtualMachine.ReadStaticVirtualMachineFile(fp) vm_pool = StaticVirtualMachine.vm_pool self.assertEqual(1, len(vm_pool)) self._AssertStaticVMsEqual( StaticVirtualMachine( StaticVmSpec(_COMPONENT, ip_address='174.12.14.1', user_name='perfkitbenchmarker', ssh_private_key='perfkitbenchmarker.pem')), vm_pool[0])