def test_mapping(self): # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was added to existing def validate_update(*kargs, **kwargs): vios_w = kargs[0] self.assertEqual(6, len(vios_w.scsi_mappings)) self.assertEqual(vios_w.scsi_mappings[0].client_adapter, vios_w.scsi_mappings[4].client_adapter) self.assertEqual(vios_w.scsi_mappings[0].server_adapter, vios_w.scsi_mappings[4].server_adapter) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) # And the VIOS was "looked up" self.assertEqual(1, self.adpt.read.call_count) # Now do it again, but passing the vios wrapper self.adpt.update_by_path.reset_mock() self.adpt.read.reset_mock() scsi_mapper.add_vscsi_mapping('host_uuid', self.v1wrap, LPAR_UUID, pv) # Since the mapping already existed, our update mock was not called self.assertEqual(0, self.adpt.update_by_path.call_count) # And the VIOS was not "looked up" self.assertEqual(0, self.adpt.read.call_count)
def connect_instance_disk_to_mgmt(self, instance): """Connect an instance's boot disk to the management partition. :param instance: The instance whose boot disk is to be mapped. :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped :return vios: The EntryWrapper of the VIOS from which the mapping was made. :raise InstanceDiskMappingFailed: If the mapping could not be done. """ for stg_elem, vios in self._get_bootdisk_iter(instance): msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name} # Create a new mapping. NOTE: If there's an existing mapping on # the other VIOS but not this one, we'll create a second mapping # here. It would take an extreme sequence of events to get to that # point, and the second mapping would be harmless anyway. The # alternative would be always checking all VIOSes for existing # mappings, which increases the response time of the common case by # an entire GET of VIOS+VIO_SMAP. LOG.debug("Mapping boot disk %(disk_name)s to the management " "partition from Virtual I/O Server %(vios_name)s.", msg_args, instance=instance) try: tsk_map.add_vscsi_mapping(self._host_uuid, vios, self.mp_uuid, stg_elem) # If that worked, we're done. add_vscsi_mapping logged. return stg_elem, vios except Exception: LOG.exception("Failed to map boot disk %(disk_name)s to the " "management partition from Virtual I/O Server " "%(vios_name)s.", msg_args, instance=instance) # Try the next hit, if available. # We either didn't find the boot dev, or failed all attempts to map it. raise exception.InstanceDiskMappingFailed(instance_name=instance.name)
def test_mapping_new_mapping(self): # Mock Data self.adpt.read.return_value = tju.load_file(VIO_MULTI_MAP_FILE, self.adpt) # Validate that the mapping was added to existing def validate_update(*kargs, **kwargs): vios_w = kargs[0] self.assertEqual(6, len(vios_w.scsi_mappings)) # Make sure that the adapters do not match self.assertNotEqual(vios_w.scsi_mappings[0].client_adapter, vios_w.scsi_mappings[5].client_adapter) self.assertNotEqual(vios_w.scsi_mappings[0].server_adapter, vios_w.scsi_mappings[5].server_adapter) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv, fuse_limit=5) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count)
def _add_mapping(self, adapter, host_uuid, vm_uuid, vios_uuid, device_name): """This method builds the vscsi map and adds the mapping to the given VIOS. :param adapter: The pypowervm API adapter. :param host_uuid: The UUID of the target host :param vm_uuid" The UUID of the VM instance :param vios_uuid: The UUID of the vios for the pypowervm adapter. :param device_name: The The hdisk device name """ pv = pvm_stor.PV.bld(adapter, device_name) tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, vm_uuid, pv)
def connect_disk(self, context, instance, disk_info, lpar_uuid): """Connects the disk image to the Virtual Machine. :param context: nova context for the transaction. :param instance: nova instance to connect the disk to. :param disk_info: The pypowervm storage element returned from create_disk_from_image. Ex. VOptMedia, VDisk, LU, or PV. :param: lpar_uuid: The pypowervm UUID that corresponds to the VM. """ # Add the mapping to the VIOS tsk_map.add_vscsi_mapping(self.host_uuid, self.vios_uuid, lpar_uuid, disk_info)
def connect_instance_disk_to_mgmt(self, instance): """Connect an instance's boot disk to the management partition. :param instance: The instance whose boot disk is to be mapped. :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped :return vios: The EntryWrapper of the VIOS from which the mapping was made. :raise InstanceDiskMappingFailed: If the mapping could not be done. """ msg_args = {"instance_name": instance.name} lpar_wrap = vm.get_instance_wrapper(self.adapter, instance, self.host_uuid) for stg_elem, vios in self.instance_disk_iter(instance, lpar_wrap=lpar_wrap): msg_args["disk_name"] = stg_elem.name msg_args["vios_name"] = vios.name # Create a new mapping. NOTE: If there's an existing mapping on # the other VIOS but not this one, we'll create a second mapping # here. It would take an extreme sequence of events to get to that # point, and the second mapping would be harmless anyway. The # alternative would be always checking all VIOSes for existing # mappings, which increases the response time of the common case by # an entire GET of VIOS+SCSI_MAPPING. LOG.debug( "Mapping boot disk %(disk_name)s of instance " "%(instance_name)s to the management partition from " "Virtual I/O Server %(vios_name)s.", msg_args, ) try: tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid, stg_elem) # If that worked, we're done. add_vscsi_mapping logged. return stg_elem, vios except Exception as e: msg_args["exc"] = e LOG.warning( _LW( "Failed to map boot disk %(disk_name)s of " "instance %(instance_name)s to the management " "partition from Virtual I/O Server " "%(vios_name)s: %(exc)s" ), msg_args, ) # Try the next hit, if available. # We either didn't find the boot dev, or failed all attempts to map it. raise npvmex.InstanceDiskMappingFailed(**msg_args)
def connect_instance_disk_to_mgmt(self, instance): """Connect an instance's boot disk to the management partition. :param instance: The instance whose boot disk is to be mapped. :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped :return vios: The EntryWrapper of the VIOS from which the mapping was made. :raise InstanceDiskMappingFailed: If the mapping could not be done. """ msg_args = {'instance_name': instance.name} lpar_wrap = vm.get_instance_wrapper(self.adapter, instance, self.host_uuid) for stg_elem, vios in self.instance_disk_iter(instance, lpar_wrap=lpar_wrap): msg_args['disk_name'] = stg_elem.name msg_args['vios_name'] = vios.name # Create a new mapping. NOTE: If there's an existing mapping on # the other VIOS but not this one, we'll create a second mapping # here. It would take an extreme sequence of events to get to that # point, and the second mapping would be harmless anyway. The # alternative would be always checking all VIOSes for existing # mappings, which increases the response time of the common case by # an entire GET of VIOS+SCSI_MAPPING. LOG.debug( "Mapping boot disk %(disk_name)s of instance " "%(instance_name)s to the management partition from " "Virtual I/O Server %(vios_name)s.", msg_args) try: tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid, stg_elem) # If that worked, we're done. add_vscsi_mapping logged. return stg_elem, vios except Exception as e: msg_args['exc'] = e LOG.warning( _LW("Failed to map boot disk %(disk_name)s of " "instance %(instance_name)s to the management " "partition from Virtual I/O Server " "%(vios_name)s: %(exc)s"), msg_args) # Try the next hit, if available. # We either didn't find the boot dev, or failed all attempts to map it. raise npvmex.InstanceDiskMappingFailed(**msg_args)
def connect_disk(self, context, instance, disk_info, lpar_uuid): """Connects the disk image to the Virtual Machine. :param context: nova context for the transaction. :param instance: nova instance to connect the disk to. :param disk_info: The pypowervm storage element returned from create_disk_from_image. Ex. VOptMedia, VDisk, LU, or PV. :param: lpar_uuid: The pypowervm UUID that corresponds to the VM. """ # Create the LU structure lu = pvm_stg.LU.bld_ref(self.adapter, disk_info.name, disk_info.udid) # Add the mapping to *each* VIOS on the LPAR's host. # Note that the LPAR's host is likely to be the same as self.host_uuid, # but this is safer. host_href = vm.get_vm_qp(self.adapter, lpar_uuid, 'AssociatedManagedSystem') host_uuid = pvm_u.get_req_path_uuid(host_href, preserve_case=True) for vios_uuid in self._vios_uuids(host_uuid=host_uuid): tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, lpar_uuid, lu)
def test_mapping_new_mapping(self): """Fuse limit, slot number, LUA via add_vscsi_mapping.""" # Mock Data self.adpt.read.return_value = self.v1resp # Validate that the mapping was added to existing def validate_update(*args, **kwargs): vios_w = args[0] self.assertEqual(6, len(vios_w.scsi_mappings)) new_map = vios_w.scsi_mappings[5] # Make sure that the adapters do not match self.assertNotEqual(vios_w.scsi_mappings[0].client_adapter, new_map.client_adapter) self.assertNotEqual(vios_w.scsi_mappings[0].server_adapter, new_map.server_adapter) # Make sure we got the right slot number and LUA self.assertEqual(23, new_map.client_adapter.lpar_slot_num) self.assertEqual('the_lua', new_map.target_dev.lua) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code # While we're here, make sure lpar_slot_num and lua go through. This # validates those kwargs in build_vscsi_mapping too. scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv, fuse_limit=5, lpar_slot_num=23, lua='the_lua') # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count)
def test_mapping(self): # Mock Data vio_resp = tju.load_file(VIO_MULTI_MAP_FILE, self.adpt) self.adpt.read.return_value = vio_resp # Validate that the mapping was added to existing def validate_update(*kargs, **kwargs): vios_w = kargs[0] self.assertEqual(6, len(vios_w.scsi_mappings)) self.assertEqual(vios_w.scsi_mappings[0].client_adapter, vios_w.scsi_mappings[4].client_adapter) self.assertEqual(vios_w.scsi_mappings[0].server_adapter, vios_w.scsi_mappings[4].server_adapter) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) # And the VIOS was "looked up" self.assertEqual(1, self.adpt.read.call_count) # Now do it again, but passing the vios wrapper vios_wrap = pvm_vios.VIOS.wrap(vio_resp) self.adpt.update_by_path.reset_mock() self.adpt.read.reset_mock() scsi_mapper.add_vscsi_mapping('host_uuid', vios_wrap, LPAR_UUID, pv) # Since the mapping already existed, our update mock was not called self.assertEqual(0, self.adpt.update_by_path.call_count) # And the VIOS was not "looked up" self.assertEqual(0, self.adpt.read.call_count)
def test_add_vscsi_mapping_root_uri(self): # Use root lpar URI href = ('https://9.1.2.3:12443/rest/api/uom/LogicalPartition/' + LPAR_UUID) self.mock_crt_href.return_value = href self.adpt.read.return_value = self.v2resp # Validate that mapping was modified def validate_update(*kargs, **kwargs): vios_w = kargs[0] # Assert that the new mapping is using the root URI self.assertEqual(href, vios_w.scsi_mappings[-1].client_lpar_href) return vios_w.entry self.adpt.update_by_path.side_effect = validate_update pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Add the vscsi mapping scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count)
def test_mapping_retry(self): """Tests that a mapping function will be retried.""" # Mock Data. Need to load this once per retry, or else the mappings # get appended with each other. self.adpt.read.side_effect = [ tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt) ] global attempt_count attempt_count = 0 # Validate that the mapping was added to existing. First few times # through loop, force a retry exception def validate_update(*kargs, **kwargs): global attempt_count attempt_count += 1 if attempt_count == 3: vios_w = kargs[0] self.assertEqual(6, len(vios_w.scsi_mappings)) return vios_w.entry else: tju.raiseRetryException() self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(3, self.adpt.update_by_path.call_count) self.assertEqual(3, attempt_count)
def test_mapping_retry(self): """Tests that a mapping function will be retried.""" # Mock Data. Need to load this once per retry, or else the mappings # get appended with each other. self.adpt.read.side_effect = [ tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt), tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)] global attempt_count attempt_count = 0 # Validate that the mapping was added to existing. First few times # through loop, force a retry exception def validate_update(*kargs, **kwargs): global attempt_count attempt_count += 1 if attempt_count == 3: vios_w = kargs[0] self.assertEqual(6, len(vios_w.scsi_mappings)) return vios_w.entry else: tju.raiseRetryException() self.adpt.update_by_path.side_effect = validate_update # Create the new storage dev pv = pvm_stor.PV.bld(self.adpt, 'pv_name', 'pv_udid') # Run the code scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, pv) # Make sure that our validation code above was invoked self.assertEqual(3, self.adpt.update_by_path.call_count) self.assertEqual(3, attempt_count)
def test_remap_storage_vopt(self): # Mock data self.adpt.read.return_value = self.v1resp # Validate that mapping was modified def validate_update(*kargs, **kwargs): vios_w = kargs[0] return vios_w.entry self.adpt.update_by_path.side_effect = validate_update # Run modify code using media name media_name = 'bldr1_dfe05349_kyleh_config.iso' vopt = pvm_stor.VOptMedia.bld(self.adpt, 'new_media.iso', size=1) vios, mod_map = scsi_mapper.modify_vopt_mapping(self.adpt, 'fake_vios_uuid', 2, new_media=vopt, media_name=media_name) # Make sure that our validation code above was invoked self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertIsNotNone(mod_map) self.assertIsInstance(mod_map.backing_storage, pvm_stor.VOptMedia) self.assertEqual(mod_map.backing_storage.name, vopt.name) # And the VIOS was "looked up" self.assertEqual(1, self.adpt.read.call_count) self.assertEqual(self.v1resp.atom, vios.entry) # Ensure exceptions raised correctly vopt2 = pvm_stor.VOptMedia.bld(self.adpt, 'new_media2.iso', size=1) vopt3 = pvm_stor.VOptMedia.bld(self.adpt, 'new_media3.iso', size=1) scsi_mapper.add_vscsi_mapping('host_uuid', 'vios_uuid', LPAR_UUID, vopt3) self.adpt.update_by_path.reset_mock() self.adpt.read.reset_mock() # Zero matching maps found self.assertRaises(exc.SingleMappingNotFoundRemapError, scsi_mapper.modify_vopt_mapping, self.adpt, 'fake_vios_uuid', 2, new_media=vopt, media_name="no_matches.iso") self.assertEqual(0, self.adpt.update_py_path.call_count) # More than one matching maps found self.assertRaises(exc.SingleMappingNotFoundRemapError, scsi_mapper.modify_vopt_mapping, self.adpt, 'fake_vios_uuid', 2, new_media=vopt2) self.assertEqual(0, self.adpt.update_py_path.call_count) # New storage element already mapped self.assertRaises(exc.StorageMapExistsRemapError, scsi_mapper.modify_vopt_mapping, self.adpt, 'fake_vios_uuid', 2, new_media=vopt3, media_name=vopt.name) self.assertEqual(0, self.adpt.update_py_path.call_count) # Run modify code using VIOS wrapper and media udid media_udid = '0ebldr1_dfe05349_kyleh_config.iso' vios_wrap = pvm_vios.VIOS.wrap( tju.load_file(VIO_MULTI_MAP_FILE, self.adpt)) self.adpt.read.reset_mock() vios, mod_map = scsi_mapper.modify_vopt_mapping(self.adpt, vios_wrap, LPAR_UUID, new_media=vopt, udid=media_udid) self.assertEqual(1, self.adpt.update_by_path.call_count) self.assertIsNotNone(mod_map) self.assertIsInstance(mod_map.backing_storage, pvm_stor.VOptMedia) self.assertEqual(mod_map.backing_storage.name, vopt.name) # But the VIOS was not "looked up" self.assertEqual(0, self.adpt.read.call_count) self.assertEqual(vios_wrap.entry, vios.entry)