def test_fixture_for_an_individual_down_cell_targeted_call(self): # We have cell0 and cell1 by default in the setup. We try targeting # both the cells. We should get a db error for the down cell and # the correct result for the up cell. ctxt = context.get_admin_context() cell0 = self.cell_mappings['cell0'] cell1 = self.cell_mappings['cell1'] with context.target_cell(ctxt, cell0) as cctxt: inst1 = fake_instance.fake_instance_obj(cctxt) if 'id' in inst1: delattr(inst1, 'id') inst1.create() with context.target_cell(ctxt, cell1) as cctxt: inst2 = fake_instance.fake_instance_obj(cctxt) if 'id' in inst2: delattr(inst2, 'id') inst2.create() def dummy_tester(ctxt, cell_mapping, uuid): with context.target_cell(ctxt, cell_mapping) as cctxt: return objects.Instance.get_by_uuid(cctxt, uuid) # Scenario A: We do not pass any down cells, fixture automatically # assumes the targeted cell is down whether its cell0 or cell1. with fixtures.DownCellFixture(): self.assertRaises( db_exc.DBError, dummy_tester, ctxt, cell1, inst2.uuid) # Scenario B: We pass cell0 as the down cell. with fixtures.DownCellFixture([cell0]): self.assertRaises( db_exc.DBError, dummy_tester, ctxt, cell0, inst1.uuid) # Scenario C: We get the correct result from the up cell # when targeted. result = dummy_tester(ctxt, cell1, inst2.uuid) self.assertEqual(inst2.uuid, result.uuid)
def test_get_by_filters_multiple_sort_keys(self): instance_first = fake_instance.fake_instance_obj( self.context, objects.Instance, uuid=uuidutils.generate_uuid(), host=None, root_gb=512, image_ref='ccc') instance_second = fake_instance.fake_instance_obj( self.context, objects.Instance, uuid=uuidutils.generate_uuid(), host=None, root_gb=512, image_ref='bbb') instance_third = fake_instance.fake_instance_obj( self.context, objects.Instance, uuid=uuidutils.generate_uuid(), host=None, root_gb=1024, image_ref='aaa') req_first = self._create_req(instance=instance_first) req_third = self._create_req(instance=instance_third) req_second = self._create_req(instance=instance_second) req_list = build_request.BuildRequestList.get_by_filters( self.context, {}, sort_keys=['root_gb', 'image_ref'], sort_dirs=['asc', 'desc']) self.assertIsInstance(req_list, objects.BuildRequestList) self.assertEqual(3, len(req_list)) self.assertEqual(req_first.instance_uuid, req_list[0].instance_uuid) objects.base.obj_equal_prims(req_first.instance, req_list[0].instance) self.assertEqual(req_second.instance_uuid, req_list[1].instance_uuid) objects.base.obj_equal_prims(req_second.instance, req_list[1].instance) self.assertEqual(req_third.instance_uuid, req_list[2].instance_uuid) objects.base.obj_equal_prims(req_third.instance, req_list[2].instance)
def test_fixture_when_explicitly_passing_down_cell_mappings(self): # The test setup creates two cell mappings (cell0 and cell1) by # default. We'll create one instance per cell and pass cell0 as # the down cell. We should thus get db_exc.DBError for cell0 and # correct InstanceList object from cell1. ctxt = context.get_admin_context() cell0 = self.cell_mappings['cell0'] cell1 = self.cell_mappings['cell1'] with context.target_cell(ctxt, cell0) as cctxt: inst1 = fake_instance.fake_instance_obj(cctxt) if 'id' in inst1: delattr(inst1, 'id') inst1.create() with context.target_cell(ctxt, cell1) as cctxt: inst2 = fake_instance.fake_instance_obj(cctxt) if 'id' in inst2: delattr(inst2, 'id') inst2.create() with fixtures.DownCellFixture([cell0]): results = context.scatter_gather_all_cells( ctxt, objects.InstanceList.get_all) self.assertEqual(2, len(results)) for cell_uuid, result in results.items(): if cell_uuid == cell0.uuid: self.assertIsInstance(result, db_exc.DBError) else: self.assertIsInstance(result, objects.InstanceList) self.assertEqual(1, len(result)) self.assertEqual(inst2.uuid, result[0].uuid)
def test_delete_instance_info(self): host_name = "fake_host" inst1 = fake_instance.fake_instance_obj("fake_context", uuid="aaa", host=host_name) inst2 = fake_instance.fake_instance_obj("fake_context", uuid="bbb", host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = {host_name: {"instances": orig_inst_dict, "updated": False}} self.host_manager.delete_instance_info("fake_context", host_name, inst1.uuid) new_info = self.host_manager._instance_info[host_name] self.assertEqual(len(new_info["instances"]), 1) self.assertTrue(new_info["updated"])
def test_sync_instance_info_fail(self): self.host_manager._recreate_instance_info = mock.MagicMock() host_name = "fake_host" inst1 = fake_instance.fake_instance_obj("fake_context", uuid="aaa", host=host_name) inst2 = fake_instance.fake_instance_obj("fake_context", uuid="bbb", host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = {host_name: {"instances": orig_inst_dict, "updated": False}} self.host_manager.sync_instance_info("fake_context", host_name, ["bbb", "aaa", "new"]) new_info = self.host_manager._instance_info[host_name] self.host_manager._recreate_instance_info.assert_called_once_with("fake_context", host_name) self.assertFalse(new_info["updated"])
def test_recreate_instance_info(self, mock_get_by_host): host_name = "fake_host" inst1 = fake_instance.fake_instance_obj("fake_context", uuid="aaa", host=host_name) inst2 = fake_instance.fake_instance_obj("fake_context", uuid="bbb", host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} new_inst_list = objects.InstanceList(objects=[inst1, inst2]) mock_get_by_host.return_value = new_inst_list self.host_manager._instance_info = {host_name: {"instances": orig_inst_dict, "updated": True}} self.host_manager._recreate_instance_info("fake_context", host_name) new_info = self.host_manager._instance_info[host_name] self.assertEqual(len(new_info["instances"]), len(new_inst_list)) self.assertFalse(new_info["updated"])
def test_update_instance_info(self): host_name = "fake_host" inst1 = fake_instance.fake_instance_obj("fake_context", uuid="aaa", host=host_name) inst2 = fake_instance.fake_instance_obj("fake_context", uuid="bbb", host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = {host_name: {"instances": orig_inst_dict, "updated": False}} inst3 = fake_instance.fake_instance_obj("fake_context", uuid="ccc", host=host_name) inst4 = fake_instance.fake_instance_obj("fake_context", uuid="ddd", host=host_name) update = objects.InstanceList(objects=[inst3, inst4]) self.host_manager.update_instance_info("fake_context", host_name, update) new_info = self.host_manager._instance_info[host_name] self.assertEqual(len(new_info["instances"]), 4) self.assertTrue(new_info["updated"])
def test_delete_instance_info_unknown_host(self): self.host_manager._recreate_instance_info = mock.MagicMock() host_name = "fake_host" inst1 = fake_instance.fake_instance_obj("fake_context", uuid="aaa", host=host_name) inst2 = fake_instance.fake_instance_obj("fake_context", uuid="bbb", host=host_name) orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2} self.host_manager._instance_info = {host_name: {"instances": orig_inst_dict, "updated": False}} bad_host = "bad_host" self.host_manager.delete_instance_info("fake_context", bad_host, "aaa") new_info = self.host_manager._instance_info[host_name] self.host_manager._recreate_instance_info.assert_called_once_with("fake_context", bad_host) self.assertEqual(len(new_info["instances"]), len(orig_inst_dict)) self.assertFalse(new_info["updated"])
def _test_live_migration(self, mock_copy_logs, side_effect): mock_instance = fake_instance.fake_instance_obj(self.context) mock_post = mock.MagicMock() mock_recover = mock.MagicMock() fake_dest = mock.sentinel.DESTINATION self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [ side_effect] if side_effect is vmutils.HyperVException: self.assertRaises(vmutils.HyperVException, self._livemigrops.live_migration, self.context, mock_instance, fake_dest, mock_post, mock_recover, False, None) mock_recover.assert_called_once_with(self.context, mock_instance, fake_dest, False) else: self._livemigrops.live_migration(context=self.context, instance_ref=mock_instance, dest=fake_dest, post_method=mock_post, recover_method=mock_recover) mock_copy_logs.assert_called_once_with(mock_instance.name, fake_dest) mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm mock_live_migr.assert_called_once_with(mock_instance.name, fake_dest) mock_post.assert_called_once_with(self.context, mock_instance, fake_dest, False)
def setUp(self): super(IronicDriverFieldsTestCase, self).setUp() self.image_meta = ironic_utils.get_test_image_meta() self.flavor = ironic_utils.get_test_flavor() self.ctx = nova_context.get_admin_context() self.instance = fake_instance.fake_instance_obj(self.ctx) self.instance.flavor = self.flavor self.node = ironic_utils.get_test_node(driver='fake') # Generic expected patches self._expected_deploy_patch = [ {'path': '/instance_info/image_source', 'value': self.image_meta.id, 'op': 'add'}, {'path': '/instance_info/root_gb', 'value': str(self.instance.flavor.root_gb), 'op': 'add'}, {'path': '/instance_info/swap_mb', 'value': str(self.flavor['swap']), 'op': 'add'}, {'path': '/instance_info/display_name', 'value': self.instance['display_name'], 'op': 'add'}, {'path': '/instance_info/vcpus', 'value': str(self.instance.flavor.vcpus), 'op': 'add'}, {'path': '/instance_info/memory_mb', 'value': str(self.instance.flavor.memory_mb), 'op': 'add'}, {'path': '/instance_info/local_gb', 'value': str(self.node.properties.get('local_gb', 0)), 'op': 'add'} ]
def setUp(self): super(ComputeRpcAPITestCase, self).setUp() self.context = context.get_admin_context() self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context) self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj) instance_attr = {'host': 'fake_host', 'instance_type_id': self.fake_flavor_obj['id'], 'instance_type': self.fake_flavor_obj} self.fake_instance_obj = fake_instance.fake_instance_obj(self.context, **instance_attr) self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj) self.fake_volume_bdm = objects_block_dev.BlockDeviceMapping( **fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'instance_uuid': self.fake_instance_obj.uuid, 'volume_id': 'fake-volume-id'})) # FIXME(melwitt): Temporary while things have no mappings self.patcher1 = mock.patch('nova.objects.InstanceMapping.' 'get_by_instance_uuid') self.patcher2 = mock.patch('nova.objects.HostMapping.get_by_host') mock_inst_mapping = self.patcher1.start() mock_host_mapping = self.patcher2.start() mock_inst_mapping.side_effect = exception.InstanceMappingNotFound( uuid=self.fake_instance_obj.uuid) mock_host_mapping.side_effect = exception.HostMappingNotFound( name=self.fake_instance_obj.host)
def test_fixture(self): # The test setup creates two cell mappings (cell0 and cell1) by # default. Let's first list servers across all cells while they are # "up" to make sure that works as expected. We'll create a single # instance in cell1. ctxt = context.get_admin_context() cell1 = self.cell_mappings[test.CELL1_NAME] with context.target_cell(ctxt, cell1) as cctxt: inst = fake_instance.fake_instance_obj(cctxt) if 'id' in inst: delattr(inst, 'id') inst.create() # Now list all instances from all cells (should get one back). results = context.scatter_gather_all_cells( ctxt, objects.InstanceList.get_all) self.assertEqual(2, len(results)) self.assertEqual(0, len(results[objects.CellMapping.CELL0_UUID])) self.assertEqual(1, len(results[cell1.uuid])) # Now do the same but with the DownCellFixture which should result # in exception results from both cells. with fixtures.DownCellFixture(): results = context.scatter_gather_all_cells( ctxt, objects.InstanceList.get_all) self.assertEqual(2, len(results)) for result in results.values(): self.assertIsInstance(result, db_exc.DBError)
def test_post_live_migration_at_destination(self, mock_log_vm): mock_instance = fake_instance.fake_instance_obj(self.context) self._livemigrops.post_live_migration_at_destination( self.context, mock_instance, network_info=mock.sentinel.NET_INFO, block_migration=mock.sentinel.BLOCK_INFO) mock_log_vm.assert_called_once_with(mock_instance.name, mock_instance.uuid)
def test_list_resizing_instances(self): instances = [{'image_ref': '1', 'host': CONF.host, 'id': '1', 'uuid': '123', 'vm_state': vm_states.RESIZED, 'task_state': None}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'bdms_by_instance_uuid') ctxt = context.get_admin_context() bdms = block_device_obj.block_device_make_list_from_dicts( ctxt, swap_bdm_256) objects.block_device.BlockDeviceMappingList.bdms_by_instance_uuid( ctxt, ['123']).AndReturn({'123': bdms}) self.mox.ReplayAll() running = image_cache_manager._list_running_instances(ctxt, all_instances) self.assertEqual(1, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual(set(['instance-00000001', '123', 'instance-00000001_resize', '123_resize']), running['instance_names'])
def fake_instance_get_all_by_host(context, host): results = [] for inst in TEST_SERVERS: if inst["host"] == host: inst_obj = fake_instance.fake_instance_obj(context, **inst) results.append(inst_obj) return results
def test_setup_instance_network_on_host(self, fake_migrate_finish): instance = fake_instance.fake_instance_obj(self.context) self.network_api.setup_instance_network_on_host( self.context, instance, 'fake_compute_source') fake_migrate_finish.assert_called_once_with( self.context, instance, {'source_compute': None, 'dest_compute': 'fake_compute_source'})
def fake_compute_api_get(self, context, instance_id, want_objects=False, expected_attrs=None): if want_objects: return fake_instance.fake_instance_obj( context, **{'uuid': instance_id}) else: return {'uuid': instance_id}
def _get_group_details_with_filter_not_configured(self, policy): wrong_filter = { 'affinity': 'ServerGroupAntiAffinityFilter', 'anti-affinity': 'ServerGroupAffinityFilter', } self.flags(scheduler_default_filters=[wrong_filter[policy]]) instance = fake_instance.fake_instance_obj(self.context, params={'host': 'hostA'}) group = objects.InstanceGroup() group.uuid = str(uuid.uuid4()) group.members = [instance.uuid] group.policies = [policy] with contextlib.nested( mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid', return_value=group), mock.patch.object(objects.InstanceGroup, 'get_hosts', return_value=['hostA']), ) as (get_group, get_hosts): scheduler_utils._SUPPORTS_ANTI_AFFINITY = None scheduler_utils._SUPPORTS_AFFINITY = None self.assertRaises(exception.NoValidHost, scheduler_utils._get_group_details, self.context, ['fake-uuid'])
def test_instance_exists(self, mock_list): mock_list.return_value = [self._instance.name.upper()] # Create a new server which not in list_instances's output another_instance = fake_instance.fake_instance_obj(self._context, id=10) self.assertTrue(self._driver.instance_exists(self._instance)) self.assertFalse(self._driver.instance_exists(another_instance))
def test_rebuild_instance(self, _record_action_start, _checks_for_create_and_rebuild, _check_auto_disk_config, _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save, _req_spec_get_by_inst_uuid): orig_system_metadata = {} instance = fake_instance.fake_instance_obj(self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell', launched_at=timeutils.utcnow(), image_ref=uuids.image_id, system_metadata=orig_system_metadata, expected_attrs=['system_metadata']) get_flavor.return_value = {} # The API request schema validates that a UUID is passed for the # imageRef parameter so we need to provide an image. image_href = uuids.image_id image = {"min_ram": 10, "min_disk": 1, "properties": {'architecture': 'x86_64'}, "id": uuids.image_id} admin_pass = '' files_to_inject = [] bdms = objects.BlockDeviceMappingList() _get_image.return_value = (None, image) bdm_get_by_instance_uuid.return_value = bdms self.compute_api.rebuild(self.context, instance, image_href, admin_pass, files_to_inject) self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
def _test_get_info(self, vm_exists): mock_instance = fake_instance.fake_instance_obj(self.context) mock_info = mock.MagicMock(spec_set=dict) fake_info = { "EnabledState": 2, "MemoryUsage": mock.sentinel.FAKE_MEM_KB, "NumberOfProcessors": mock.sentinel.FAKE_NUM_CPU, "UpTime": mock.sentinel.FAKE_CPU_NS, } def getitem(key): return fake_info[key] mock_info.__getitem__.side_effect = getitem expected = hardware.InstanceInfo( state=constants.HYPERV_POWER_STATE[2], max_mem_kb=mock.sentinel.FAKE_MEM_KB, mem_kb=mock.sentinel.FAKE_MEM_KB, num_cpu=mock.sentinel.FAKE_NUM_CPU, cpu_time_ns=mock.sentinel.FAKE_CPU_NS, ) self._vmops._vmutils.vm_exists.return_value = vm_exists self._vmops._vmutils.get_vm_summary_info.return_value = mock_info if not vm_exists: self.assertRaises(exception.InstanceNotFound, self._vmops.get_info, mock_instance) else: response = self._vmops.get_info(mock_instance) self._vmops._vmutils.vm_exists.assert_called_once_with(mock_instance.name) self._vmops._vmutils.get_vm_summary_info.assert_called_once_with(mock_instance.name) self.assertEqual(response, expected)
def setUp(self): super(ServerPasswordTest, self).setUp() fakes.stub_out_nw_api(self.stubs) self.stubs.Set( compute.api.API, 'get', lambda self, ctxt, *a, **kw: fake_instance.fake_instance_obj( ctxt, system_metadata={}, expected_attrs=['system_metadata'])) self.password = '******' def fake_extract_password(instance): return self.password def fake_convert_password(context, password): self.password = password return {} self.stubs.Set(password, 'extract_password', fake_extract_password) self.stubs.Set(password, 'convert_password', fake_convert_password) self.flags( osapi_compute_extension=[ 'nova.api.openstack.compute.contrib.select_extensions'], osapi_compute_ext_list=['Server_password'])
def test_cleanup_instance_network_on_host(self, fake_migrate_start): instance = fake_instance.fake_instance_obj(self.context) self.network_api.cleanup_instance_network_on_host( self.context, instance, 'fake_compute_source') fake_migrate_start.assert_called_once_with( self.context, instance, {'source_compute': 'fake_compute_source', 'dest_compute': None})
def test_power_on_having_block_devices(self, mock_set_vm_state, mock_fix_instance_vol_paths): mock_instance = fake_instance.fake_instance_obj(self.context) self._vmops.power_on(mock_instance, mock.sentinel.block_device_info) mock_fix_instance_vol_paths.assert_called_once_with(mock_instance.name, mock.sentinel.block_device_info) mock_set_vm_state.assert_called_once_with(mock_instance, constants.HYPERV_VM_STATE_ENABLED)
def fake_compute_api_get(self, context, instance_id, **kwargs): want_objects = kwargs.get('want_objects') if want_objects: return fake_instance.fake_instance_obj( context, **{'uuid': instance_id}) else: return {'uuid': instance_id}
def test_create_vm_invalid_guestid(self, mock_log_warn): """Ensure we warn when create_vm() fails after we passed an unrecognised guestId """ found = [False] def fake_log_warn(msg, values): if not isinstance(values, dict): return if values.get('ostype') == 'invalid_os_type': found[0] = True mock_log_warn.side_effect = fake_log_warn instance_values = {'id': 7, 'name': 'fake-name', 'uuid': uuidutils.generate_uuid(), 'vcpus': 2, 'memory_mb': 2048} instance = fake_instance.fake_instance_obj( context.RequestContext('fake', 'fake', is_admin=False), **instance_values) session = driver.VMwareAPISession() config_spec = vm_util.get_vm_create_spec( session.vim.client.factory, instance, instance.name, 'fake-datastore', [], vm_util.ExtraSpecs(), os_type='invalid_os_type') self.assertRaises(vexc.VMwareDriverException, vm_util.create_vm, session, instance, 'folder', config_spec, 'res-pool') self.assertTrue(found[0])
def test_check_target_flavor(self): mock_instance = fake_instance.fake_instance_obj(self.context) mock_instance.root_gb = 1 mock_flavor = mock.MagicMock(root_gb=0) self.assertRaises(exception.InstanceFaultRollback, self._migrationops._check_target_flavor, mock_instance, mock_flavor)
def _test_pre_live_migration(self, mock_initialize_connection, mock_get_cached_image, mock_ebs_root_in_block_devices, mock_get_disk_path_mapping, phys_disks_attached=True): mock_instance = fake_instance.fake_instance_obj(self.context) mock_instance.image_ref = "fake_image_ref" mock_ebs_root_in_block_devices.return_value = None mock_get_disk_path_mapping.return_value = ( mock.sentinel.disk_path_mapping if phys_disks_attached else None) CONF.set_override('use_cow_images', True) self._livemigrops.pre_live_migration( self.context, mock_instance, block_device_info=mock.sentinel.BLOCK_INFO, network_info=mock.sentinel.NET_INFO) check_config = ( self._livemigrops._livemigrutils.check_live_migration_config) check_config.assert_called_once_with() mock_ebs_root_in_block_devices.assert_called_once_with( mock.sentinel.BLOCK_INFO) mock_get_cached_image.assert_called_once_with(self.context, mock_instance) mock_initialize_connection.assert_called_once_with( mock.sentinel.BLOCK_INFO) mock_get_disk_path_mapping.assert_called_once_with( mock.sentinel.BLOCK_INFO) if phys_disks_attached: livemigrutils = self._livemigrops._livemigrutils livemigrutils.create_planned_vm.assert_called_once_with( mock_instance.name, mock_instance.host, mock.sentinel.disk_path_mapping)
def _test_power_off(self, timeout, set_state_expected=True): instance = fake_instance.fake_instance_obj(self.context) with mock.patch.object(self._vmops, "_set_vm_state") as mock_set_state: self._vmops.power_off(instance, timeout) if set_state_expected: mock_set_state.assert_called_once_with(instance, constants.HYPERV_VM_STATE_DISABLED)
def fake_get(self, context, id, expected_attrs=None): return fake_instance.fake_instance_obj( context, uuid=id, project_id=context.project_id, user_id=context.user_id, expected_attrs=expected_attrs)
def _fake_object(self, updates): return fake_instance.fake_instance_obj(None, **updates)
def return_server(compute_api, context, instance_id, expected_attrs=None): return fake_instance.fake_instance_obj(context, vm_state=vm_states.ACTIVE)
def fake_compute_get(*args, **kwargs): inst = fakes.stub_instance(1, uuid=UUID3, launched_at=DATE1, terminated_at=DATE2) return fake_instance.fake_instance_obj(args[1], **inst)
def setUp(self): super(TestZVMDriver, self).setUp() self.flags(my_ip='192.168.1.1', instance_name_template='abc%05d') self.flags(cloud_connector_url='https://1.1.1.1:1111', group='zvm') with mock.patch('nova.virt.zvm.utils.ConnectorClient.call') as mcall, \ mock.patch('pwd.getpwuid', return_value=mock.Mock(pw_name='test')): mcall.return_value = {'hypervisor_hostname': 'TESTHOST', 'ipl_time': 'IPL at 11/14/17 10:47:44 EST'} self._driver = zvmdriver.ZVMDriver(fake.FakeVirtAPI()) self._hypervisor = self._driver._hypervisor self._context = context.RequestContext('fake_user', 'fake_project') self._image_id = uuidsentinel.imag_id self._instance_values = { 'display_name': 'test', 'uuid': uuidsentinel.inst_id, 'vcpus': 1, 'memory_mb': 1024, 'image_ref': self._image_id, 'root_gb': 0, } self._instance = fake_instance.fake_instance_obj( self._context, **self._instance_values) self._instance.flavor = objects.Flavor(name='testflavor', vcpus=1, root_gb=3, ephemeral_gb=10, swap=0, memory_mb=512, extra_specs={}) self._eph_disks = [{'guest_format': u'ext3', 'device_name': u'/dev/sdb', 'disk_bus': None, 'device_type': None, 'size': 1}, {'guest_format': u'ext4', 'device_name': u'/dev/sdc', 'disk_bus': None, 'device_type': None, 'size': 2}] self._block_device_info = {'swap': None, 'root_device_name': u'/dev/sda', 'ephemerals': self._eph_disks, 'block_device_mapping': []} fake_image_meta = {'status': 'active', 'properties': {'os_distro': 'rhel7.2'}, 'name': 'rhel72eckdimage', 'deleted': False, 'container_format': 'bare', 'disk_format': 'raw', 'id': self._image_id, 'owner': 'cfc26f9d6af948018621ab00a1675310', 'checksum': 'b026cd083ef8e9610a29eaf71459cc', 'min_disk': 0, 'is_public': False, 'deleted_at': None, 'min_ram': 0, 'size': 465448142} self._image_meta = objects.ImageMeta.from_dict(fake_image_meta) subnet_4 = network_model.Subnet(cidr='192.168.0.1/24', dns=[network_model.IP('192.168.0.1')], gateway= network_model.IP('192.168.0.1'), ips=[ network_model.IP('192.168.0.100')], routes=None) network = network_model.Network(id=0, bridge='fa0', label='fake', subnets=[subnet_4], vlan=None, bridge_interface=None, injected=True) self._network_values = { 'id': None, 'address': 'DE:AD:BE:EF:00:00', 'network': network, 'type': network_model.VIF_TYPE_OVS, 'devname': None, 'ovs_interfaceid': None, 'rxtx_cap': 3 } self._network_info = network_model.NetworkInfo([ network_model.VIF(**self._network_values) ]) self.mock_update_task_state = mock.Mock()
def setUp(self, mock_register, mock_service): super(ConfigDriveTestCase, self).setUp() vm_util.vm_refs_cache_reset() self.context = context.RequestContext('fake', 'fake', is_admin=False) self.flags(cluster_name='test_cluster', host_ip='testhostname', host_username='******', host_password='******', use_linked_clone=False, group='vmware') self.flags(enabled=False, group='vnc') vmwareapi_fake.reset() stubs.set_stubs(self) nova.tests.unit.image.fake.stub_out_image_service(self) self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI) self.network_info = utils.get_test_network_info() self.node_name = self.conn._nodename image_ref = nova.tests.unit.image.fake.get_valid_image_id() instance_values = { 'vm_state': 'building', 'project_id': 'fake', 'user_id': 'fake', 'name': '1', 'kernel_id': '1', 'ramdisk_id': '1', 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}], 'memory_mb': 8192, 'flavor': objects.Flavor(vcpus=4, extra_specs={}), 'instance_type_id': 0, 'vcpus': 4, 'root_gb': 80, 'image_ref': image_ref, 'host': 'fake_host', 'task_state': 'scheduling', 'reservation_id': 'r-3t8muvr0', 'id': 1, 'uuid': uuidsentinel.foo, 'node': self.node_name, 'metadata': [], 'expected_attrs': ['system_metadata'], } self.test_instance = fake_instance.fake_instance_obj(self.context, **instance_values) self.test_instance.flavor = objects.Flavor(vcpus=4, memory_mb=8192, root_gb=80, ephemeral_gb=0, swap=0, extra_specs={}) (image_service, image_id) = glance.get_remote_image_service(context, image_ref) metadata = image_service.show(context, image_id) self.image = objects.ImageMeta.from_dict({ 'id': image_ref, 'disk_format': 'vmdk', 'size': int(metadata['size']), }) class FakeInstanceMetadata(object): def __init__(self, instance, content=None, extra_md=None, network_info=None, request_context=None): pass def metadata_for_config_drive(self): return [] self.useFixture(fixtures.MonkeyPatch( 'nova.api.metadata.base.InstanceMetadata', FakeInstanceMetadata)) def fake_make_drive(_self, _path): pass # We can't actually make a config drive v2 because ensure_tree has # been faked out self.stub_out('nova.virt.configdrive.ConfigDriveBuilder.make_drive', fake_make_drive) def fake_upload_iso_to_datastore(iso_path, instance, **kwargs): pass self.stub_out('nova.virt.vmwareapi.images.upload_iso_to_datastore', fake_upload_iso_to_datastore)
def test_filter_rules_instance_admin(self): db_context = nova_context.RequestContext(user_id='fake-user', project_id='fake-project') instance = fake_instance.fake_instance_obj(db_context) self._check_filter_rules(target=instance)
def test_configured_checksum_path(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.flags(image_info_filename_pattern=('$instances_path/' '%(image)s.info'), group='libvirt') # Ensure there is a base directory os.mkdir(os.path.join(tmpdir, '_base')) # Fake the database call which lists running instances instances = [{'image_ref': '1', 'host': CONF.host, 'name': 'instance-1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '1', 'host': CONF.host, 'name': 'instance-2', 'uuid': '456', 'vm_state': '', 'task_state': ''}] all_instances = [] for instance in instances: all_instances.append(fake_instance.fake_instance_obj( None, **instance)) def touch(filename): f = open(filename, 'w') f.write('Touched') f.close() old = time.time() - (25 * 3600) hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3' base_filename = os.path.join(tmpdir, hashed) touch(base_filename) touch(base_filename + '.info') os.utime(base_filename + '.info', (old, old)) touch(base_filename + '.info') os.utime(base_filename + '.info', (old, old)) self.mox.StubOutWithMock( objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(None) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '456').AndReturn(None) self.mox.ReplayAll() image_cache_manager = imagecache.ImageCacheManager() image_cache_manager.update(ctxt, all_instances) self.assertTrue(os.path.exists(base_filename)) self.assertTrue(os.path.exists(base_filename + '.info'))
def fake_compute_get(*args, **kwargs): inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE) return fake_instance.fake_instance_obj(args[1], expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
def fake_instance_get_by_uuid(context, instance_id, use_slave=False): return fake_instance.fake_instance_obj( None, **{ 'name': 'fake', 'project_id': context.project_id })
def test_remove_fixed_ip_from_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) address = 'fake-address' self._test_refresh_cache('remove_fixed_ip_from_instance', self.context, instance, address)
def test_add_fixed_ip_to_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) network_id = 'fake-network-id' self._test_refresh_cache('add_fixed_ip_to_instance', self.context, instance, network_id)
def test_list_running_instances(self): instances = [{'image_ref': '1', 'host': CONF.host, 'id': '1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'host': CONF.host, 'id': '2', 'uuid': '456', 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'kernel_id': '21', 'ramdisk_id': '22', 'host': 'remotehost', 'id': '3', 'uuid': '789', 'vm_state': '', 'task_state': ''}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(swap_bdm_256) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '456').AndReturn(swap_bdm_128) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '789').AndReturn(swap_bdm_128) self.mox.ReplayAll() # The argument here should be a context, but it's mocked out running = image_cache_manager._list_running_instances(ctxt, all_instances) self.assertEqual(4, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual((1, 1, ['instance-00000002', 'instance-00000003']), running['used_images']['2']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['21']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['22']) self.assertIn('instance-00000001', running['instance_names']) self.assertIn('123', running['instance_names']) self.assertEqual(4, len(running['image_popularity'])) self.assertEqual(1, running['image_popularity']['1']) self.assertEqual(2, running['image_popularity']['2']) self.assertEqual(1, running['image_popularity']['21']) self.assertEqual(1, running['image_popularity']['22']) self.assertEqual(len(running['used_swap_images']), 2) self.assertIn('swap_128', running['used_swap_images']) self.assertIn('swap_256', running['used_swap_images'])
def fake_compute_api_get(self, context, instance_id, expected_attrs=None): return fake_instance.fake_instance_obj( context, **{'uuid': instance_id})
def test_instance_object_none_info_cache(self): inst = fake_instance.fake_instance_obj('fake-context', expected_attrs=['info_cache']) self.assertIsNone(inst.info_cache) result = compute_utils.get_nw_info_for_instance(inst) self.assertEqual(jsonutils.dumps([]), result.json())
def fake_compute_get(*args, **kwargs): return fake_instance.fake_instance_obj(args[1], id=1, uuid=UUID, **kwargs)
def test_verify_base_images(self): hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab' hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8' hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17' hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56' self.flags(instances_path='/instance_path', image_cache_subdirectory_name='_base') base_file_list = ['00000001', 'ephemeral_0_20_None', 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm', 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm', hashed_42, hashed_1, hashed_21, hashed_22, '%s_5368709120' % hashed_1, '%s_10737418240' % hashed_1, '00000004'] def fq_path(path): return os.path.join('/instance_path/_base/', path) # Fake base directory existence orig_exists = os.path.exists def exists(path): # The python coverage tool got angry with my overly broad mocks if not path.startswith('/instance_path'): return orig_exists(path) if path in ['/instance_path', '/instance_path/_base', '/instance_path/instance-1/disk', '/instance_path/instance-2/disk', '/instance_path/instance-3/disk', '/instance_path/_base/%s.info' % hashed_42]: return True for p in base_file_list: if path == fq_path(p): return True if path == fq_path(p) + '.info': return False if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1, hashed_21, hashed_22, hashed_42]]: return False self.fail('Unexpected path existence check: %s' % path) self.stubs.Set(os.path, 'exists', lambda x: exists(x)) self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None) # We need to stub utime as well self.stubs.Set(os, 'utime', lambda x, y: None) # Fake up some instances in the instances directory orig_listdir = os.listdir def listdir(path): # The python coverage tool got angry with my overly broad mocks if not path.startswith('/instance_path'): return orig_listdir(path) if path == '/instance_path': return ['instance-1', 'instance-2', 'instance-3', '_base'] if path == '/instance_path/_base': return base_file_list self.fail('Unexpected directory listed: %s' % path) self.stubs.Set(os, 'listdir', lambda x: listdir(x)) # Fake isfile for these faked images in _base orig_isfile = os.path.isfile def isfile(path): # The python coverage tool got angry with my overly broad mocks if not path.startswith('/instance_path'): return orig_isfile(path) for p in base_file_list: if path == fq_path(p): return True self.fail('Unexpected isfile call: %s' % path) self.stubs.Set(os.path, 'isfile', lambda x: isfile(x)) # Fake the database call which lists running instances instances = [{'image_ref': '1', 'host': CONF.host, 'name': 'instance-1', 'uuid': '123', 'vm_state': '', 'task_state': ''}, {'image_ref': '1', 'kernel_id': '21', 'ramdisk_id': '22', 'host': CONF.host, 'name': 'instance-2', 'uuid': '456', 'vm_state': '', 'task_state': ''}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() # Fake the utils call which finds the backing image def get_disk_backing_file(path): if path in ['/instance_path/instance-1/disk', '/instance_path/instance-2/disk']: return fq_path('%s_5368709120' % hashed_1) self.fail('Unexpected backing file lookup: %s' % path) self.stubs.Set(libvirt_utils, 'get_disk_backing_file', lambda x: get_disk_backing_file(x)) # Fake out verifying checksums, as that is tested elsewhere self.stubs.Set(image_cache_manager, '_verify_checksum', lambda x, y: True) # Fake getmtime as well orig_getmtime = os.path.getmtime def getmtime(path): if not path.startswith('/instance_path'): return orig_getmtime(path) return 1000000 self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x)) # Make sure we don't accidentally remove a real file orig_remove = os.remove def remove(path): if not path.startswith('/instance_path'): return orig_remove(path) # Don't try to remove fake files return self.stubs.Set(os, 'remove', lambda x: remove(x)) self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList, 'get_by_instance_uuid') ctxt = context.get_admin_context() objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '123').AndReturn(None) objects.block_device.BlockDeviceMappingList.get_by_instance_uuid( ctxt, '456').AndReturn(None) self.mox.ReplayAll() # And finally we can make the call we're actually testing... # The argument here should be a context, but it is mocked out image_cache_manager.update(ctxt, all_instances) # Verify active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1), fq_path(hashed_21), fq_path(hashed_22)] for act in active: self.assertIn(act, image_cache_manager.active_base_files) self.assertEqual(len(image_cache_manager.active_base_files), len(active)) for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'), fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'), fq_path(hashed_42), fq_path('%s_10737418240' % hashed_1)]: self.assertIn(rem, image_cache_manager.removable_base_files) # Ensure there are no "corrupt" images as well self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
def _fake_get(stub_self, context, instance_uuid, expected_attrs=None): return fake_instance.fake_instance_obj(None, **{'uuid': instance_uuid})
def fake_compute_get(*args, **kwargs): inst = fakes.stub_instance(1, uuid=UUID1) return fake_instance.fake_instance_obj(args[1], **inst)
def _fake_get(self, context, instance_uuid, expected_attrs=None, want_objects=True): return fake_instance.fake_instance_obj(None, **{'uuid': instance_uuid})
def test_deallocate_for_instance(self): instance = fake_instance.fake_instance_obj(context.get_admin_context()) self._test_network_api('deallocate_for_instance', rpc_method='call', requested_networks=self.DefaultArg(None), instance=instance, version='1.11')
def fake_get(self, context, id, expected_attrs=None, cell_down_support=False): return fake_instance.fake_instance_obj(context, uuid=id, project_id=context.project_id, user_id=context.user_id, expected_attrs=expected_attrs)
def fake_get_instance(self): ctxt = self.req.environ['nova.context'] return fake_instance.fake_instance_obj( ctxt, uuid=fakes.FAKE_UUID, vm_state=vm_states.SHELVED_OFFLOADED)
def _return_server(*_args, **_kwargs): inst = fakes.stub_instance(*args, **kwargs) return fake_instance.fake_instance_obj(_args[1], **inst)
def fake_compute_get(*args, **kwargs): inst = fakes.stub_instance(1, uuid=UUID3, host="get-host", vm_state=vm_states.ACTIVE) return fake_instance.fake_instance_obj(args[1], **inst)
def test_list_running_instances(self, mock_bdms_by_uuid): instances = [{'image_ref': '1', 'host': CONF.host, 'id': '1', 'uuid': uuids.instance_1, 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'host': CONF.host, 'id': '2', 'uuid': uuids.instance_2, 'vm_state': '', 'task_state': ''}, {'image_ref': '2', 'kernel_id': '21', 'ramdisk_id': '22', 'host': 'remotehost', 'id': '3', 'uuid': uuids.instance_3, 'vm_state': '', 'task_state': ''}] all_instances = [fake_instance.fake_instance_obj(None, **instance) for instance in instances] image_cache_manager = imagecache.ImageCacheManager() ctxt = context.get_admin_context() swap_bdm_256_list = block_device_obj.block_device_make_list_from_dicts( ctxt, swap_bdm_256 + ephemeral_bdm) swap_bdm_128_list = block_device_obj.block_device_make_list_from_dicts( ctxt, swap_bdm_128) mock_bdms_by_uuid.return_value = {uuids.instance_1: swap_bdm_256_list, uuids.instance_2: swap_bdm_128_list, uuids.instance_3: swap_bdm_128_list} # The argument here should be a context, but it's mocked out running = image_cache_manager._list_running_instances(ctxt, all_instances) mock_bdms_by_uuid.assert_called_once_with(ctxt, [uuids.instance_1, uuids.instance_2, uuids.instance_3]) self.assertEqual(4, len(running['used_images'])) self.assertEqual((1, 0, ['instance-00000001']), running['used_images']['1']) self.assertEqual((1, 1, ['instance-00000002', 'instance-00000003']), running['used_images']['2']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['21']) self.assertEqual((0, 1, ['instance-00000003']), running['used_images']['22']) self.assertIn('instance-00000001', running['instance_names']) self.assertIn(uuids.instance_1, running['instance_names']) self.assertEqual(len(running['used_swap_images']), 2) self.assertIn('swap_128', running['used_swap_images']) self.assertIn('swap_256', running['used_swap_images']) self.assertEqual(len(running['used_ephemeral_images']), 1) self.assertIn('ephemeral_4_0706d66', running['used_ephemeral_images'])
def test_deallocate_for_instance_with_expected_networks(self): instance = fake_instance.fake_instance_obj(context.get_admin_context()) self._test_network_api('deallocate_for_instance', rpc_method='call', instance=instance, requested_networks={}, version='1.11')
def fake_compute_get_empty(*args, **kwargs): inst = fakes.stub_instance(1, uuid=UUID3, host="", vm_state=vm_states.ACTIVE, availability_zone='fakeaz') return fake_instance.fake_instance_obj(args[1], **inst)
def fake_get_instance(self, context, id): return fake_instance.fake_instance_obj( req_context, project_id=req_context.project_id, user_id=req_context.user_id)
def test_allocate_for_instance_refresh_cache(self): instance = fake_instance.fake_instance_obj(self.context) vpn = 'fake-vpn' requested_networks = [('fake-networks', None)] self._test_refresh_cache('allocate_for_instance', self.context, instance, vpn, requested_networks)
def test_setup_networks_on_host(self): ctxt = context.RequestContext('fake_user', 'fake_project') instance = fake_instance.fake_instance_obj(ctxt) self._test_network_api('setup_networks_on_host', rpc_method='call', instance_id=instance.id, host='fake_host', teardown=False, instance=instance, version='1.16')