def connect_volume_to_vio(vios_w): """Attempts to connect a volume to a given VIO. :param vios_w: The Virtual I/O Server wrapper to connect to. :return: True if the volume was connected. False if the volume was not (could be the Virtual I/O Server does not have connectivity to the hdisk). """ status, device_name, udid = self._discover_volume_on_vios( vios_w, self.volume_id) if hdisk.good_discovery(status, device_name): # Found a hdisk on this Virtual I/O Server. Add the action to # map it to the VM when the stg_ftsk is executed. with lockutils.lock(hash(self)): self._add_append_mapping(vios_w.uuid, device_name) # Save the UDID for the disk in the connection info. It is # used for the detach. self._set_udid(udid) LOG.debug('Device attached: %s', device_name) # Valid attachment return True return False
def connect_volume_to_vio(vios_w): """Attempts to connect a volume to a given VIO. :param vios_w: The Virtual I/O Server wrapper to connect to. :return: True if the volume was connected. False if the volume was not (could be the Virtual I/O Server does not have connectivity to the hdisk). """ status, device_name, udid = self._discover_volume_on_vios(vios_w, self.volume_id) if hdisk.good_discovery(status, device_name): # Found a hdisk on this Virtual I/O Server. Add the action to # map it to the VM when the stg_ftsk is executed. with lockutils.lock(hash(self)): self._add_append_mapping(vios_w.uuid, device_name) # Save the UDID for the disk in the connection info. It is # used for the detach. self._set_udid(udid) LOG.debug("Device attached: %s", device_name) # Valid attachment return True return False
def _attach_volume_to_vio(self, vios_w): """Attempts to attach a volume to a given VIO. :param vios_w: The Virtual I/O Server wrapper to attach to. :return: True if the volume was attached. False if the volume was not (could be the Virtual I/O Server does not have connectivity to the hdisk). """ status, device_name, udid = self._discover_volume_on_vios(vios_w) if hdisk.good_discovery(status, device_name): # Found a hdisk on this Virtual I/O Server. Add the action to # map it to the VM when the stg_ftsk is executed. with lockutils.lock(self.volume_id): self._add_append_mapping(vios_w.uuid, device_name, tag=self.volume_id) # Save the UDID for the disk in the connection info. It is # used for the detach. self._set_udid(udid) LOG.debug( 'Added deferred task to attach device %(device_name)s ' 'to vios %(vios_name)s.', { 'device_name': device_name, 'vios_name': vios_w.name }, instance=self.instance) # Valid attachment return True return False
def _discover_volume_on_vios(self, vios_w, volume_id): """Discovers an hdisk on a single vios for the volume. :param vios_w: VIOS wrapper to process :param volume_id: Volume to discover :returns: Status of the volume or None :returns: Device name or None :returns: LUN or None """ # Get the initiatior WWPNs, targets and Lun for the given VIOS. vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w) # Build the ITL map and discover the hdisks on the Virtual I/O # Server (if any). itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun) if len(itls) == 0: LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.' % {'vios': vios_w.name, 'volume_id': volume_id}) return None, None, None status, device_name, udid = hdisk.discover_hdisk(self.adapter, vios_w.uuid, itls) if hdisk.good_discovery(status, device_name): LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for ' 'volume %(volume_id)s. Status code: %(status)s.'), {'hdisk': device_name, 'vios': vios_w.name, 'volume_id': volume_id, 'status': str(status)}) elif status == hdisk.LUAStatus.DEVICE_IN_USE: LOG.warning(_LW('Discovered device %(dev)s for volume %(volume)s ' 'on %(vios)s is in use. Error code: %(status)s.'), {'dev': device_name, 'volume': volume_id, 'vios': vios_w.name, 'status': str(status)}) return status, device_name, udid
def _attach_volume_to_vio(self, vios_w): """Attempts to attach a volume to a given VIO. :param vios_w: The Virtual I/O Server wrapper to attach to. :return: True if the volume was attached. False if the volume was not (could be the Virtual I/O Server does not have connectivity to the hdisk). """ status, device_name, udid = self._discover_volume_on_vios(vios_w) if hdisk.good_discovery(status, device_name): # Found a hdisk on this Virtual I/O Server. Add the action to # map it to the VM when the stg_ftsk is executed. with lockutils.lock(self.volume_id): self._add_append_mapping(vios_w.uuid, device_name, tag=self.volume_id) # Save the UDID for the disk in the connection info. It is # used for the detach. self._set_udid(udid) LOG.debug('Added deferred task to attach device %(device_name)s ' 'to vios %(vios_name)s.', {'device_name': device_name, 'vios_name': vios_w.name}, instance=self.instance) # Valid attachment return True return False
def _discover_volume_on_vios(self, vios_w, volume_id): """Discovers an hdisk on a single vios for the volume. :param vios_w: VIOS wrapper to process :param volume_id: Volume to discover :returns: Status of the volume or None :returns: Device name or None :returns: UDID or None """ # Get the initiatior WWPNs, targets and Lun for the given VIOS. vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w) # Build the ITL map and discover the hdisks on the Virtual I/O # Server (if any). itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun) if len(itls) == 0: LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.', { 'vios': vios_w.name, 'volume_id': volume_id }, instance=self.instance) return None, None, None device_id = self.connection_info.get('data', {}).get('pg83NAA') if device_id: device_id = base64.b64encode(device_id.encode()) status, device_name, udid = hdisk.discover_hdisk(self.adapter, vios_w.uuid, itls, device_id=device_id) if hdisk.good_discovery(status, device_name): LOG.info( 'Discovered %(hdisk)s on vios %(vios)s for volume ' '%(volume_id)s. Status code: %(status)s.', { 'hdisk': device_name, 'vios': vios_w.name, 'volume_id': volume_id, 'status': status }, instance=self.instance) elif status == hdisk.LUAStatus.DEVICE_IN_USE: LOG.warning( 'Discovered device %(dev)s for volume %(volume)s ' 'on %(vios)s is in use. Error code: %(status)s.', { 'dev': device_name, 'volume': volume_id, 'vios': vios_w.name, 'status': status }, instance=self.instance) return status, device_name, udid
def _connect_volume_to_vio(self, vios_w, slot_mgr): """Attempts to connect a volume to a given VIO. :param vios_w: The Virtual I/O Server wrapper to connect to. :param slot_mgr: A NovaSlotManager. Used to delete the client slots used when a volume is detached from the VM :return: True if the volume was connected. False if the volume was not (could be the Virtual I/O Server does not have connectivity to the hdisk). """ status, device_name, udid = self._discover_volume_on_vios( vios_w, self.volume_id) # Get the slot and LUA to assign. slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid) if slot_mgr.is_rebuild and not slot: LOG.debug( 'Detected a device with UDID %(udid)s on VIOS ' '%(vios)s on the rebuild that did not exist on the ' 'source. Ignoring.', { 'udid': udid, 'vios': vios_w.uuid }, instance=self.instance) return False if hdisk.good_discovery(status, device_name): volume_id = self.connection_info["data"]["volume_id"] # Found a hdisk on this Virtual I/O Server. Add the action to # map it to the VM when the stg_ftsk is executed. with lockutils.lock(hash(self)): self._add_append_mapping(vios_w.uuid, device_name, lpar_slot_num=slot, lua=lua, tag=volume_id) # Save the UDID for the disk in the connection info. It is # used for the detach. self._set_udid(udid) LOG.debug( 'Added deferred task to attach device %(device_name)s ' 'to vios %(vios_name)s.', { 'device_name': device_name, 'vios_name': vios_w.name }, instance=self.instance) # Valid attachment return True return False
def is_volume_on_vios(self, vios_w): """Returns whether or not the volume is on a VIOS. :param vios_w: The Virtual I/O Server wrapper. :return: True if the volume driver's volume is on the VIOS. False otherwise. :return: The udid of the device. """ status, device_name, udid = self._discover_volume_on_vios( vios_w, self.volume_id) return hdisk.good_discovery(status, device_name), udid
def _discover_volume_on_vios(self, vios_w, volume_id): """Discovers an hdisk on a single vios for the volume. :param vios_w: VIOS wrapper to process :param volume_id: Volume to discover :returns: Status of the volume or None :returns: Device name or None :returns: LUN or None """ # Get the initiatior WWPNs, targets and Lun for the given VIOS. vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w) # Build the ITL map and discover the hdisks on the Virtual I/O # Server (if any). itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun) if len(itls) == 0: LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.' % { 'vios': vios_w.name, 'volume_id': volume_id }) return None, None, None status, device_name, udid = hdisk.discover_hdisk( self.adapter, vios_w.uuid, itls) if hdisk.good_discovery(status, device_name): LOG.info( _LI('Discovered %(hdisk)s on vios %(vios)s for ' 'volume %(volume_id)s. Status code: %(status)s.'), { 'hdisk': device_name, 'vios': vios_w.name, 'volume_id': volume_id, 'status': str(status) }) elif status == hdisk.LUAStatus.DEVICE_IN_USE: LOG.warn( _LW('Discovered device %(dev)s for volume %(volume)s ' 'on %(vios)s is in use. Error code: %(status)s.'), { 'dev': device_name, 'volume': volume_id, 'vios': vios_w.name, 'status': str(status) }) return status, device_name, udid
def pre_live_migration_on_destination(self, mig_data): """Perform pre live migration steps for the volume on the target host. This method performs any pre live migration that is needed. Certain volume connectors may need to pass data from the source host to the target. This may be required to determine how volumes connect through the Virtual I/O Servers. This method will be called after the pre_live_migration_on_source method. The data from the pre_live call will be passed in via the mig_data. This method should put its output into the dest_mig_data. :param mig_data: Dict of migration data for the destination server. If the volume connector needs to provide information to the live_migration command, it should be added to this dictionary. """ volume_id = self.volume_id found = False # See the connect_volume for why this is a direct call instead of # using the tx_mgr.feed vios_feed = self.adapter.read(pvm_vios.VIOS.schema_type, xag=[pvm_const.XAG.VIO_STOR]) vios_wraps = pvm_vios.VIOS.wrap(vios_feed) # Iterate through host vios list to find valid hdisks. for vios_w in vios_wraps: status, device_name, udid = self._discover_volume_on_vios( vios_w, volume_id) # If we found one, no need to check the others. found = found or hdisk.good_discovery(status, device_name) # if valid udid is returned save in mig_data volume_key = 'vscsi-' + volume_id if udid is not None: mig_data[volume_key] = udid if not found or volume_key not in mig_data: ex_args = dict(volume_id=volume_id, instance_name=self.instance.name) raise p_exc.VolumePreMigrationFailed(**ex_args)
def pre_live_migration_on_destination(self, mig_data): """Perform pre live migration steps for the volume on the target host. This method performs any pre live migration that is needed. Certain volume connectors may need to pass data from the source host to the target. This may be required to determine how volumes connect through the Virtual I/O Servers. This method will be called after the pre_live_migration_on_source method. The data from the pre_live call will be passed in via the mig_data. This method should put its output into the dest_mig_data. :param mig_data: Dict of migration data for the destination server. If the volume connector needs to provide information to the live_migration command, it should be added to this dictionary. """ volume_id = self.volume_id found = False # See the connect_volume for why this is a direct call instead of # using the tx_mgr.feed vios_wraps = pvm_vios.VIOS.get(self.adapter, xag=[pvm_const.XAG.VIO_STOR]) # Iterate through host vios list to find valid hdisks. for vios_w in vios_wraps: status, device_name, udid = self._discover_volume_on_vios( vios_w, volume_id) # If we found one, no need to check the others. found = found or hdisk.good_discovery(status, device_name) # if valid udid is returned save in mig_data volume_key = 'vscsi-' + volume_id if udid is not None: mig_data[volume_key] = udid if not found or volume_key not in mig_data: ex_args = dict(volume_id=volume_id, instance_name=self.instance.name) raise p_exc.VolumePreMigrationFailed(**ex_args)
def connect_volume_to_vio(vios_w): """Attempts to connect a volume to a given VIO. :param vios_w: The Virtual I/O Server wrapper to connect to. :return: True if the volume was connected. False if the volume was not (could be the Virtual I/O Server does not have connectivity to the hdisk). """ status, device_name, udid = self._discover_volume_on_vios( vios_w, self.volume_id) # Get the slot and LUA to assign. slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid) if slot_mgr.is_rebuild and not slot: LOG.debug('Detected a device with UDID %s on VIOS %s on the ' 'rebuild that did not exist on the source. ' 'Ignoring.', udid, vios_w.uuid) return False if hdisk.good_discovery(status, device_name): # Found a hdisk on this Virtual I/O Server. Add the action to # map it to the VM when the stg_ftsk is executed. with lockutils.lock(hash(self)): self._add_append_mapping(vios_w.uuid, device_name, lpar_slot_num=slot, lua=lua) # Save the UDID for the disk in the connection info. It is # used for the detach. self._set_udid(udid) LOG.debug('Device attached: %s', device_name) # Valid attachment return True return False
def discon_vol_for_vio(vios_w): """Removes the volume from a specific Virtual I/O Server. :param vios_w: The VIOS wrapper. :return: True if a remove action was done against this VIOS. False otherwise. """ LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s", dict(vol=self.volume_id, uuid=vios_w.uuid)) udid, device_name = None, None try: udid = self._get_udid() if udid: # This will only work if vios_w has the Storage XAG. device_name = vios_w.hdisk_from_uuid(udid) if not udid or not device_name: # We lost our bdm data. We'll need to discover it. status, device_name, udid = self._discover_volume_on_vios( vios_w, self.volume_id) # If we have a device name, but not a udid, at this point # we should not continue. The hdisk is in a bad state # in the I/O Server. Subsequent scrub code on future # deploys will clean this up. if not hdisk.good_discovery(status, device_name): LOG.warning(_LW( "Disconnect Volume: The backing hdisk for volume " "%(volume_id)s on Virtual I/O Server %(vios)s is " "not in a valid state. No disconnect " "actions to be taken as volume is not healthy."), {'volume_id': self.volume_id, 'vios': vios_w.name}) return False except Exception as e: LOG.warning(_LW( "Disconnect Volume: Failed to find disk on Virtual I/O " "Server %(vios_name)s for volume %(volume_id)s. Volume " "UDID: %(volume_uid)s. Error: %(error)s"), {'error': e, 'volume_uid': udid, 'vios_name': vios_w.name, 'volume_id': self.volume_id}) return False # We have found the device name LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s " "on Virtual I/O Server %(vios_name)s for volume " "%(volume_id)s. Volume UDID: %(volume_uid)s."), {'volume_uid': udid, 'volume_id': self.volume_id, 'vios_name': vios_w.name, 'hdisk': device_name}) # Add the action to remove the mapping when the stg_ftsk is run. partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) with lockutils.lock(hash(self)): self._add_remove_mapping(partition_id, vios_w.uuid, device_name, slot_mgr) # Add a step to also remove the hdisk self._add_remove_hdisk(vios_w, device_name) # Found a valid element to remove return True
def discon_vol_for_vio(vios_w): """Removes the volume from a specific Virtual I/O Server. :param vios_w: The VIOS wrapper. :return: True if a remove action was done against this VIOS. False otherwise. """ LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s", dict(vol=self.volume_id, uuid=vios_w.uuid)) udid, device_name = None, None try: udid = self._get_udid() if not udid: # We lost our bdm data. We'll need to discover it. status, device_name, udid = self._discover_volume_on_vios( vios_w, self.volume_id) # If we have a device name, but not a udid, at this point # we should not continue. The hdisk is in a bad state # in the I/O Server. Subsequent scrub code on future # deploys will clean this up. if not hdisk.good_discovery(status, device_name): LOG.warning( _LW("Disconnect Volume: The backing hdisk for volume " "%(volume_id)s on Virtual I/O Server %(vios)s is " "not in a valid state. No disconnect " "actions to be taken as volume is not healthy." ), { 'volume_id': self.volume_id, 'vios': vios_w.name }) return False if udid and not device_name: device_name = vios_w.hdisk_from_uuid(udid) if not device_name: LOG.warning( _LW("Disconnect Volume: No mapped device found on Virtual " "I/O Server %(vios)s for volume %(volume_id)s. " "Volume UDID: %(volume_uid)s"), { 'volume_uid': udid, 'volume_id': self.volume_id, 'vios': vios_w.name }) return False except Exception as e: LOG.warning( _LW("Disconnect Volume: Failed to find disk on Virtual I/O " "Server %(vios_name)s for volume %(volume_id)s. Volume " "UDID: %(volume_uid)s. Error: %(error)s"), { 'error': e, 'volume_uid': udid, 'vios_name': vios_w.name, 'volume_id': self.volume_id }) return False # We have found the device name LOG.info( _LI("Disconnect Volume: Discovered the device %(hdisk)s " "on Virtual I/O Server %(vios_name)s for volume " "%(volume_id)s. Volume UDID: %(volume_uid)s."), { 'volume_uid': udid, 'volume_id': self.volume_id, 'vios_name': vios_w.name, 'hdisk': device_name }) # Add the action to remove the mapping when the stg_ftsk is run. partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) with lockutils.lock(hash(self)): self._add_remove_mapping(partition_id, vios_w.uuid, device_name) # Add a step to also remove the hdisk self._add_remove_hdisk(vios_w, device_name) # Found a valid element to remove return True
def discon_vol_for_vio(vios_w): """Removes the volume from a specific Virtual I/O Server. :param vios_w: The VIOS wrapper. :return: True if a remove action was done against this VIOS. False otherwise. """ LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s", dict(vol=self.volume_id, uuid=vios_w.uuid), instance=self.instance) device_name = None udid = self._get_udid() try: if udid: # This will only work if vios_w has the Storage XAG. device_name = vios_w.hdisk_from_uuid(udid) if not udid or not device_name: # We lost our bdm data. We'll need to discover it. status, device_name, udid = self._discover_volume_on_vios( vios_w, self.volume_id) # Check if the hdisk is in a bad state in the I/O Server. # Subsequent scrub code on future deploys will clean it up. if not hdisk.good_discovery(status, device_name): LOG.warning( "Disconnect Volume: The backing hdisk for volume " "%(volume_id)s on Virtual I/O Server %(vios)s is " "not in a valid state. This may be the result of " "an evacuate.", { 'volume_id': self.volume_id, 'vios': vios_w.name }, instance=self.instance) return False except Exception: LOG.exception( "Disconnect Volume: Failed to find disk on Virtual I/O " "Server %(vios_name)s for volume %(volume_id)s. Volume " "UDID: %(volume_uid)s.", { 'vios_name': vios_w.name, 'volume_id': self.volume_id, 'volume_uid': udid }, instance=self.instance) return False # We have found the device name LOG.info( "Disconnect Volume: Discovered the device %(hdisk)s " "on Virtual I/O Server %(vios_name)s for volume " "%(volume_id)s. Volume UDID: %(volume_uid)s.", { 'volume_uid': udid, 'volume_id': self.volume_id, 'vios_name': vios_w.name, 'hdisk': device_name }, instance=self.instance) # Add the action to remove the mapping when the stg_ftsk is run. partition_id = vm.get_vm_id(self.adapter, self.vm_uuid) with lockutils.lock(hash(self)): self._add_remove_mapping(partition_id, vios_w.uuid, device_name, slot_mgr) # Add a step to also remove the hdisk self._add_remove_hdisk(vios_w, device_name) # Found a valid element to remove return True
def _detach_vol_for_vio(self, vios_w): """Removes the volume from a specific Virtual I/O Server. :param vios_w: The VIOS wrapper. :return: True if a remove action was done against this VIOS. False otherwise. """ LOG.debug("Detach volume %(vol)s from vios %(vios)s", dict(vol=self.volume_id, vios=vios_w.name), instance=self.instance) device_name = None udid = self._get_udid() try: if udid: # This will only work if vios_w has the Storage XAG. device_name = vios_w.hdisk_from_uuid(udid) if not udid or not device_name: # We lost our bdm data. We'll need to discover it. status, device_name, udid = self._discover_volume_on_vios( vios_w) # Check if the hdisk is in a bad state in the I/O Server. # Subsequent scrub code on future deploys will clean this up. if not hdisk.good_discovery(status, device_name): LOG.warning( "Detach Volume: The backing hdisk for volume " "%(volume_id)s on Virtual I/O Server %(vios)s is " "not in a valid state. This may be the result of " "an evacuate.", {'volume_id': self.volume_id, 'vios': vios_w.name}, instance=self.instance) return False except Exception: LOG.exception( "Detach Volume: Failed to find disk on Virtual I/O " "Server %(vios_name)s for volume %(volume_id)s. Volume " "UDID: %(volume_uid)s.", {'vios_name': vios_w.name, 'volume_id': self.volume_id, 'volume_uid': udid, }, instance=self.instance) return False # We have found the device name LOG.info("Detach Volume: Discovered the device %(hdisk)s " "on Virtual I/O Server %(vios_name)s for volume " "%(volume_id)s. Volume UDID: %(volume_uid)s.", {'hdisk': device_name, 'vios_name': vios_w.name, 'volume_id': self.volume_id, 'volume_uid': udid}, instance=self.instance) # Add the action to remove the mapping when the stg_ftsk is run. partition_id = vm.get_vm_qp(self.adapter, self.vm_uuid, qprop='PartitionID') with lockutils.lock(self.volume_id): self._add_remove_mapping(partition_id, vios_w.uuid, device_name) # Add a step to also remove the hdisk self._add_remove_hdisk(vios_w, device_name) # Found a valid element to remove return True