def plug(self, vif, new_vif=True): """Plugs a virtual interface (network) into a VM. This method simply creates the client network adapter into the VM. :param vif: The virtual interface to plug into the instance. :param new_vif: (Optional, Default: True) If set, indicates that it is a brand new VIF. If False, it indicates that the VIF is already on the client but should be treated on the bridge. :return: The new vif that was created. Only returned if new_vif is set to True. Otherwise None is expected. """ # Do nothing if not a new VIF if not new_vif: return None lpar_uuid = vm.get_pvm_uuid(self.instance) # CNA's require a VLAN. The networking-powervm neutron agent puts this # in the vif details. vlan = int(vif['details']['vlan']) LOG.debug("Creating SEA-based VIF with VLAN %s", str(vlan), instance=self.instance) cna_w = pvm_cna.crt_cna(self.adapter, None, lpar_uuid, vlan, mac_addr=vif['address']) return cna_w
def detach_disk(self, instance): """Detaches the storage adapters from the image disk. :param instance: Instance to disconnect the image for. :return: A list of all the backing storage elements that were disconnected from the I/O Server and VM. """ lpar_uuid = vm.get_pvm_uuid(instance) # Build the match function match_func = tsk_map.gen_match_func(pvm_stg.VDisk) vios_w = pvm_vios.VIOS.get(self._adapter, uuid=self._vios_uuid, xag=[pvm_const.XAG.VIO_SMAP]) # Remove the mappings. mappings = tsk_map.remove_maps(vios_w, lpar_uuid, match_func=match_func) # Update the VIOS with the removed mappings. vios_w.update() return [x.backing_storage for x in mappings]
def get_vnc_console(self, context, instance): """Get connection info for a vnc console. :param context: security context :param instance: nova.objects.instance.Instance :return: An instance of console.type.ConsoleVNC """ self._log_operation('get_vnc_console', instance) lpar_uuid = vm.get_pvm_uuid(instance) # Build the connection to the VNC. host = CONF.vnc.server_proxyclient_address # TODO(thorst, efried) Add the x509 certificate support when it lands try: # Open up a remote vterm port = pvm_vterm.open_remotable_vnc_vterm(self.adapter, lpar_uuid, host, vnc_path=lpar_uuid) # Note that the VNC viewer will wrap the internal_access_path with # the HTTP content. return console_type.ConsoleVNC(host=host, port=port, internal_access_path=lpar_uuid) except pvm_exc.HttpError as e: with excutils.save_and_reraise_exception(logger=LOG) as sare: # If the LPAR was not found, raise a more descriptive error if e.response.status == 404: sare.reraise = False raise exc.InstanceNotFound(instance_id=instance.uuid)
def dlt_vopt(self, instance, stg_ftsk): """Deletes the virtual optical and scsi mappings for a VM. :param instance: The nova instance whose VOpt(s) are to be removed. :param stg_ftsk: A FeedTask. The actions to modify the storage will be added as batched functions onto the FeedTask. """ lpar_uuid = vm.get_pvm_uuid(instance) # The matching function for find_maps, remove_maps match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia) # Add a function to remove the mappings stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask( tsk_map.remove_maps, lpar_uuid, match_func=match_func) # Find the VOpt device based from the mappings media_mappings = tsk_map.find_maps(stg_ftsk.get_wrapper( self.vios_uuid).scsi_mappings, client_lpar_id=lpar_uuid, match_func=match_func) media_elems = [x.backing_storage for x in media_mappings] def rm_vopt(): LOG.info("Removing virtual optical storage.", instance=instance) vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid, parent_type=pvm_vios.VIOS, parent_uuid=self.vios_uuid) tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems) # Add task to remove the media if it exists if media_elems: stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
def dlt_vopt(self, instance, stg_ftsk): """Deletes the virtual optical and scsi mappings for a VM. :param instance: The nova instance whose VOpt(s) are to be removed. :param stg_ftsk: A FeedTask. The actions to modify the storage will be added as batched functions onto the FeedTask. """ lpar_uuid = vm.get_pvm_uuid(instance) # The matching function for find_maps, remove_maps match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia) # Add a function to remove the mappings stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask( tsk_map.remove_maps, lpar_uuid, match_func=match_func) # Find the VOpt device based from the mappings media_mappings = tsk_map.find_maps( stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings, client_lpar_id=lpar_uuid, match_func=match_func) media_elems = [x.backing_storage for x in media_mappings] def rm_vopt(): LOG.info("Removing virtual optical storage.", instance=instance) vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid, parent_type=pvm_vios.VIOS, parent_uuid=self.vios_uuid) tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems) # Add task to remove the media if it exists if media_elems: stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
def execute(self, vm_cnas): LOG.info('Plugging the Management Network Interface to instance.', instance=self.instance) # Determine if we need to create the secure RMC VIF. This should only # be needed if there is not a VIF on the secure RMC vSwitch vswitch = None vswitches = pvm_net.VSwitch.search( self.adapter, parent_type=pvm_ms.System.schema_type, parent_uuid=self.adapter.sys_uuid, name=SECURE_RMC_VSWITCH) if len(vswitches) == 1: vswitch = vswitches[0] if vswitch is None: LOG.warning('No management VIF created for instance due to lack ' 'of Management Virtual Switch', instance=self.instance) return None # This next check verifies that there are no existing NICs on the # vSwitch, so that the VM does not end up with multiple RMC VIFs. if vm_cnas is None: has_mgmt_vif = vm.get_cnas(self.adapter, self.instance, vswitch_uri=vswitch.href) else: has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas] if has_mgmt_vif: LOG.debug('Management VIF already created for instance', instance=self.instance) return None lpar_uuid = vm.get_pvm_uuid(self.instance) return pvm_cna.crt_cna(self.adapter, None, lpar_uuid, SECURE_RMC_VLAN, vswitch=SECURE_RMC_VSWITCH, crt_vswitch=True)
def attach_disk(self, instance, disk_info, stg_ftsk): """Connects the disk image to the Virtual Machine. :param instance: nova instance to which to attach the disk. :param disk_info: The pypowervm storage element returned from create_disk_from_image. Ex. VOptMedia, VDisk, LU, or PV. :param stg_ftsk: FeedTask to defer storage connectivity operations. """ # Create the LU structure lu = pvm_stg.LU.bld_ref(self._adapter, disk_info.name, disk_info.udid) lpar_uuid = vm.get_pvm_uuid(instance) # This is the delay apply mapping def add_func(vios_w): LOG.info("Attaching SSP disk from VIOS %s.", vios_w.name, instance=instance) mapping = tsk_map.build_vscsi_mapping( self._host_uuid, vios_w, lpar_uuid, lu) return tsk_map.add_map(vios_w, mapping) # Add the mapping to *each* VIOS on the LPAR's host. # The LPAR's host has to be self.host_uuid, else the PowerVM API will # fail. # # Note: this may not be all the VIOSes on the system - just the ones # in the SSP cluster. for vios_uuid in self._vios_uuids: stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
def attach_disk(self, instance, disk_info, stg_ftsk): """Connects the disk image to the Virtual Machine. :param instance: nova instance to which to attach the disk. :param disk_info: The pypowervm storage element returned from create_disk_from_image. Ex. VOptMedia, VDisk, LU, or PV. :param stg_ftsk: FeedTask to defer storage connectivity operations. """ # Create the LU structure lu = pvm_stg.LU.bld_ref(self._adapter, disk_info.name, disk_info.udid) lpar_uuid = vm.get_pvm_uuid(instance) # This is the delay apply mapping def add_func(vios_w): LOG.info("Attaching SSP disk from VIOS %s.", vios_w.name, instance=instance) mapping = tsk_map.build_vscsi_mapping(self._host_uuid, vios_w, lpar_uuid, lu) return tsk_map.add_map(vios_w, mapping) # Add the mapping to *each* VIOS on the LPAR's host. # The LPAR's host has to be self._host_uuid, else the PowerVM API will # fail. # # Note: this may not be all the VIOSes on the system - just the ones # in the SSP cluster. for vios_uuid in self._vios_uuids: stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
def get_vnc_console(self, context, instance): """Get connection info for a vnc console. :param context: security context :param instance: nova.objects.instance.Instance :return: An instance of console.type.ConsoleVNC """ self._log_operation('get_vnc_console', instance) lpar_uuid = vm.get_pvm_uuid(instance) # Build the connection to the VNC. host = CONF.vnc.server_proxyclient_address # TODO(thorst, efried) Add the x509 certificate support when it lands try: # Open up a remote vterm port = pvm_vterm.open_remotable_vnc_vterm( self.adapter, lpar_uuid, host, vnc_path=lpar_uuid) # Note that the VNC viewer will wrap the internal_access_path with # the HTTP content. return console_type.ConsoleVNC(host=host, port=port, internal_access_path=lpar_uuid) except pvm_exc.HttpError as e: with excutils.save_and_reraise_exception(logger=LOG) as sare: # If the LPAR was not found, raise a more descriptive error if e.response.status == 404: sare.reraise = False raise exc.InstanceNotFound(instance_id=instance.uuid)
def add_func(vios_w): LOG.info("Adding cfg drive mapping to Virtual I/O Server %s.", vios_w.name, instance=instance) mapping = tsk_map.build_vscsi_mapping(None, vios_w, vm.get_pvm_uuid(instance), vopt) return tsk_map.add_map(vios_w, mapping)
def detach_disk(self, instance): """Detaches the storage adapters from the disk. :param instance: instance from which to detach the image. :return: A list of all the backing storage elements that were detached from the I/O Server and VM. """ stg_ftsk = tsk_par.build_active_vio_feed_task( self._adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP]) lpar_uuid = vm.get_pvm_uuid(instance) match_func = tsk_map.gen_match_func(pvm_stg.LU) def rm_func(vwrap): LOG.info("Removing SSP disk connection to VIOS %s.", vwrap.name, instance=instance) return tsk_map.remove_maps(vwrap, lpar_uuid, match_func=match_func) # Remove the mapping from *each* VIOS on the LPAR's host. # The LPAR's host has to be self._host_uuid, else the PowerVM API will # fail. # # Note - this may not be all the VIOSes on the system...just the ones # in the SSP cluster. # # The mappings will normally be the same on all VIOSes, unless a VIOS # was down when a disk was added. So for the return value, we need to # collect the union of all relevant mappings from all VIOSes. lu_set = set() for vios_uuid in self._vios_uuids: # Add the remove for the VIO stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func) # Find the active LUs so that a delete op knows what to remove. vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper mappings = tsk_map.find_maps(vios_w.scsi_mappings, client_lpar_id=lpar_uuid, match_func=match_func) if mappings: lu_set.update([x.backing_storage for x in mappings]) stg_ftsk.execute() return list(lu_set)
def detach_disk(self, instance): """Detaches the storage adapters from the disk. :param instance: instance from which to detach the image. :return: A list of all the backing storage elements that were detached from the I/O Server and VM. """ stg_ftsk = tsk_par.build_active_vio_feed_task( self._adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP]) lpar_uuid = vm.get_pvm_uuid(instance) match_func = tsk_map.gen_match_func(pvm_stg.LU) def rm_func(vwrap): LOG.info("Removing SSP disk connection to VIOS %s.", vwrap.name, instance=instance) return tsk_map.remove_maps(vwrap, lpar_uuid, match_func=match_func) # Remove the mapping from *each* VIOS on the LPAR's host. # The LPAR's host has to be self.host_uuid, else the PowerVM API will # fail. # # Note - this may not be all the VIOSes on the system...just the ones # in the SSP cluster. # # The mappings will normally be the same on all VIOSes, unless a VIOS # was down when a disk was added. So for the return value, we need to # collect the union of all relevant mappings from all VIOSes. lu_set = set() for vios_uuid in self._vios_uuids: # Add the remove for the VIO stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func) # Find the active LUs so that a delete op knows what to remove. vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper mappings = tsk_map.find_maps(vios_w.scsi_mappings, client_lpar_id=lpar_uuid, match_func=match_func) if mappings: lu_set.update([x.backing_storage for x in mappings]) stg_ftsk.execute() return list(lu_set)
def get_bootdisk_path(self, instance, vios_uuid): """Find the local path for the instance's boot disk. :param instance: nova.objects.instance.Instance object owning the requested disk. :param vios_uuid: PowerVM UUID of the VIOS to search for mappings. :return: Local path for instance's boot disk. """ vm_uuid = vm.get_pvm_uuid(instance) match_func = self._disk_match_func(DiskType.BOOT, instance) vios_wrap = pvm_vios.VIOS.get(self._adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP]) maps = tsk_map.find_maps(vios_wrap.scsi_mappings, client_lpar_id=vm_uuid, match_func=match_func) if maps: return maps[0].server_adapter.backing_dev_name return None
def execute(self, vm_cnas): LOG.info('Plugging the Management Network Interface to instance.', instance=self.instance) # Determine if we need to create the secure RMC VIF. This should only # be needed if there is not a VIF on the secure RMC vSwitch vswitch = None vswitches = pvm_net.VSwitch.search( self.adapter, parent_type=pvm_ms.System.schema_type, parent_uuid=self.adapter.sys_uuid, name=SECURE_RMC_VSWITCH) if len(vswitches) == 1: vswitch = vswitches[0] if vswitch is None: LOG.warning( 'No management VIF created for instance due to lack ' 'of Management Virtual Switch', instance=self.instance) return None # This next check verifies that there are no existing NICs on the # vSwitch, so that the VM does not end up with multiple RMC VIFs. if vm_cnas is None: has_mgmt_vif = vm.get_cnas(self.adapter, self.instance, vswitch_uri=vswitch.href) else: has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas] if has_mgmt_vif: LOG.debug('Management VIF already created for instance', instance=self.instance) return None lpar_uuid = vm.get_pvm_uuid(self.instance) return pvm_cna.crt_cna(self.adapter, None, lpar_uuid, SECURE_RMC_VLAN, vswitch=SECURE_RMC_VSWITCH, crt_vswitch=True)
def __init__(self, adapter, instance, connection_info, stg_ftsk=None): """Initialize the PowerVMVolumeAdapter :param adapter: The pypowervm adapter. :param instance: The nova instance that the volume should attach to. :param connection_info: The volume connection info generated from the BDM. Used to determine how to attach the volume to the VM. :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the I/O Operations. If provided, the Virtual I/O Server mapping updates will be added to the FeedTask. This defers the updates to some later point in time. If the FeedTask is not provided, the updates will be run immediately when the respective method is executed. """ self.adapter = adapter self.instance = instance self.connection_info = connection_info self.vm_uuid = vm.get_pvm_uuid(instance) self.reset_stg_ftsk(stg_ftsk=stg_ftsk) self._pfc_wwpns = None
def get_bootdisk_path(self, instance, vios_uuid): """Find scsi mappings on given VIOS for the instance. This method finds all scsi mappings on a given vios that are associated with the instance and disk_type. :param instance: nova.objects.instance.Instance object owning the requested disk. :param vios_uuid: PowerVM UUID of the VIOS to search for mappings. :return: Iterator of scsi mappings that are associated with the instance and disk_type or None. """ vm_uuid = vm.get_pvm_uuid(instance) match_func = self._disk_match_func(DiskType.BOOT, instance) vios_wrap = pvm_vios.VIOS.get(self._adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP]) maps = tsk_map.find_maps(vios_wrap.scsi_mappings, client_lpar_id=vm_uuid, match_func=match_func) if maps: return maps[0].server_adapter.backing_dev_name return None
def attach_disk(self, instance, disk_info, stg_ftsk): """Attaches the disk image to the Virtual Machine. :param instance: nova instance to connect the disk to. :param disk_info: The pypowervm storage element returned from create_disk_from_image. Ex. VOptMedia, VDisk, LU, or PV. :param stg_ftsk: The pypowervm transaction FeedTask for the I/O Operations. The Virtual I/O Server mapping updates will be added to the FeedTask. This defers the updates to some later point in time. """ lpar_uuid = vm.get_pvm_uuid(instance) def add_func(vios_w): LOG.info("Adding logical volume disk connection to VIOS %(vios)s.", {'vios': vios_w.name}, instance=instance) mapping = tsk_map.build_vscsi_mapping( self._host_uuid, vios_w, lpar_uuid, disk_info) return tsk_map.add_map(vios_w, mapping) stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(add_func)
def detach_disk(self, instance): """Detaches the storage adapters from the image disk. :param instance: Instance to disconnect the image for. :return: A list of all the backing storage elements that were disconnected from the I/O Server and VM. """ lpar_uuid = vm.get_pvm_uuid(instance) # Build the match function match_func = tsk_map.gen_match_func(pvm_stg.VDisk) vios_w = pvm_vios.VIOS.get( self._adapter, uuid=self._vios_uuid, xag=[pvm_const.XAG.VIO_SMAP]) # Remove the mappings. mappings = tsk_map.remove_maps( vios_w, lpar_uuid, match_func=match_func) # Update the VIOS with the removed mappings. vios_w.update() return [x.backing_storage for x in mappings]
from pypowervm import const as pvm_const from pypowervm import exceptions as pvm_exc from pypowervm.tasks import storage as tsk_stg from pypowervm.utils import transaction as pvm_tx from pypowervm.wrappers import cluster as pvm_clust from pypowervm.wrappers import storage as pvm_stg from pypowervm.wrappers import virtual_io_server as pvm_vios from nova import exception from nova import test from nova.tests.unit.virt import powervm from nova.virt.powervm.disk import ssp as ssp_dvr from nova.virt.powervm import vm FAKE_INST_UUID = uuidutils.generate_uuid(dashed=True) FAKE_INST_UUID_PVM = vm.get_pvm_uuid(mock.Mock(uuid=FAKE_INST_UUID)) class TestSSPDiskAdapter(test.NoDBTestCase): """Unit Tests for the LocalDisk storage driver.""" def setUp(self): super(TestSSPDiskAdapter, self).setUp() self.inst = powervm.TEST_INSTANCE self.apt = mock.Mock() self.host_uuid = 'host_uuid' self.ssp_wrap = mock.create_autospec(pvm_stg.SSP, instance=True) # SSP.refresh() returns itself
def plug(self, vif, new_vif=True): """Plugs a virtual interface (network) into a VM. Creates a 'peer to peer' connection between the Management partition hosting the Linux I/O and the client VM. There will be one trunk adapter for a given client adapter. The device will be 'up' on the mgmt partition. Will make sure that the trunk device has the appropriate metadata (e.g. port id) set on it so that the Open vSwitch agent picks it up properly. :param vif: The virtual interface to plug into the instance. :param new_vif: (Optional, Default: True) If set, indicates that it is a brand new VIF. If False, it indicates that the VIF is already on the client but should be treated on the bridge. :return: The new vif that was created. Only returned if new_vif is set to True. Otherwise None is expected. """ # Create the trunk and client adapter. lpar_uuid = vm.get_pvm_uuid(self.instance) mgmt_uuid = pvm_par.get_this_partition(self.adapter).uuid mtu = vif['network'].get_meta('mtu') if 'devname' in vif: dev_name = vif['devname'] else: dev_name = ("nic" + vif['id'])[:network_model.NIC_NAME_LEN] meta_attrs = ','.join([ 'iface-id=%s' % (vif.get('ovs_interfaceid') or vif['id']), 'iface-status=active', 'attached-mac=%s' % vif['address'], 'vm-uuid=%s' % self.instance.uuid ]) if new_vif: return pvm_cna.crt_p2p_cna(self.adapter, None, lpar_uuid, [mgmt_uuid], NOVALINK_VSWITCH, crt_vswitch=True, mac_addr=vif['address'], dev_name=dev_name, ovs_bridge=vif['network']['bridge'], ovs_ext_ids=meta_attrs, configured_mtu=mtu)[0] else: # Bug : https://bugs.launchpad.net/nova-powervm/+bug/1731548 # When a host is rebooted, something is discarding tap devices for # VMs deployed with OVS vif. To prevent VMs losing network # connectivity, this is fixed by recreating the tap devices during # init of the nova compute service, which will call vif plug with # new_vif==False. # Find the CNA for this vif. # TODO(esberglu) improve performance by caching VIOS wrapper(s) and # CNA lists (in case >1 vif per VM). cna_w_list = vm.get_cnas(self.adapter, self.instance) cna_w = self._find_cna_for_vif(cna_w_list, vif) if not cna_w: LOG.warning( 'Unable to plug VIF with mac %s for instance. The ' 'VIF was not found on the instance.', vif['address'], instance=self.instance) return None # Find the corresponding trunk adapter trunks = pvm_cna.find_trunks(self.adapter, cna_w) for trunk in trunks: # Set MTU, OVS external ids, and OVS bridge metadata trunk.configured_mtu = mtu trunk.ovs_ext_ids = meta_attrs trunk.ovs_bridge = vif['network']['bridge'] # Updating the trunk adapter will cause NovaLink to reassociate # the tap device. trunk.update()
def plug(self, vif, new_vif=True): """Plugs a virtual interface (network) into a VM. Creates a 'peer to peer' connection between the Management partition hosting the Linux I/O and the client VM. There will be one trunk adapter for a given client adapter. The device will be 'up' on the mgmt partition. Will make sure that the trunk device has the appropriate metadata (e.g. port id) set on it so that the Open vSwitch agent picks it up properly. :param vif: The virtual interface to plug into the instance. :param new_vif: (Optional, Default: True) If set, indicates that it is a brand new VIF. If False, it indicates that the VIF is already on the client but should be treated on the bridge. :return: The new vif that was created. Only returned if new_vif is set to True. Otherwise None is expected. """ # Create the trunk and client adapter. lpar_uuid = vm.get_pvm_uuid(self.instance) mgmt_uuid = pvm_par.get_this_partition(self.adapter).uuid mtu = vif['network'].get_meta('mtu') if 'devname' in vif: dev_name = vif['devname'] else: dev_name = ("nic" + vif['id'])[:network_model.NIC_NAME_LEN] meta_attrs = ','.join([ 'iface-id=%s' % (vif.get('ovs_interfaceid') or vif['id']), 'iface-status=active', 'attached-mac=%s' % vif['address'], 'vm-uuid=%s' % self.instance.uuid]) if new_vif: return pvm_cna.crt_p2p_cna( self.adapter, None, lpar_uuid, [mgmt_uuid], NOVALINK_VSWITCH, crt_vswitch=True, mac_addr=vif['address'], dev_name=dev_name, ovs_bridge=vif['network']['bridge'], ovs_ext_ids=meta_attrs, configured_mtu=mtu)[0] else: # Bug : https://bugs.launchpad.net/nova-powervm/+bug/1731548 # When a host is rebooted, something is discarding tap devices for # VMs deployed with OVS vif. To prevent VMs losing network # connectivity, this is fixed by recreating the tap devices during # init of the nova compute service, which will call vif plug with # new_vif==False. # Find the CNA for this vif. # TODO(esberglu) improve performance by caching VIOS wrapper(s) and # CNA lists (in case >1 vif per VM). cna_w_list = vm.get_cnas(self.adapter, self.instance) cna_w = self._find_cna_for_vif(cna_w_list, vif) if not cna_w: LOG.warning('Unable to plug VIF with mac %s for instance. The ' 'VIF was not found on the instance.', vif['address'], instance=self.instance) return None # Find the corresponding trunk adapter trunks = pvm_cna.find_trunks(self.adapter, cna_w) for trunk in trunks: # Set MTU, OVS external ids, and OVS bridge metadata trunk.configured_mtu = mtu trunk.ovs_ext_ids = meta_attrs trunk.ovs_bridge = vif['network']['bridge'] # Updating the trunk adapter will cause NovaLink to reassociate # the tap device. trunk.update()
def add_func(vios_w): LOG.info("Adding cfg drive mapping to Virtual I/O Server %s.", vios_w.name, instance=instance) mapping = tsk_map.build_vscsi_mapping( None, vios_w, vm.get_pvm_uuid(instance), vopt) return tsk_map.add_map(vios_w, mapping)
from pypowervm import const as pvm_const from pypowervm import exceptions as pvm_exc from pypowervm.tasks import storage as tsk_stg from pypowervm.utils import transaction as pvm_tx from pypowervm.wrappers import cluster as pvm_clust from pypowervm.wrappers import storage as pvm_stg from pypowervm.wrappers import virtual_io_server as pvm_vios from nova import exception from nova import test from nova.tests.unit.virt import powervm from nova.virt.powervm.disk import ssp as ssp_dvr from nova.virt.powervm import vm FAKE_INST_UUID = uuidutils.generate_uuid(dashed=True) FAKE_INST_UUID_PVM = vm.get_pvm_uuid(mock.Mock(uuid=FAKE_INST_UUID)) class TestSSPDiskAdapter(test.TestCase): """Unit Tests for the LocalDisk storage driver.""" def setUp(self): super(TestSSPDiskAdapter, self).setUp() self.inst = powervm.TEST_INSTANCE self.apt = mock.Mock() self.host_uuid = 'host_uuid' self.ssp_wrap = mock.create_autospec(pvm_stg.SSP, instance=True)