def _detach_volume_iscsi(self, connection_info, instance, mountpoint): """Detach volume storage to VM instance.""" instance_name = instance['name'] vm_ref = vm_util.get_vm_ref(self._session, instance) # Detach Volume from VM LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s", {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance) data = connection_info['data'] # Discover iSCSI Target device_name, uuid = self._iscsi_get_target(data) if device_name is None: raise exception.StorageError( reason=_("Unable to find iSCSI Target")) # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") device = vm_util.get_rdm_disk(hardware_devices, uuid) if device is None: raise exception.StorageError(reason=_("Unable to find volume")) self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True) LOG.info(_("Mountpoint %(mountpoint)s detached from " "instance %(instance_name)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance)
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None): """Introduce VDI in the host.""" try: vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) if vdi_ref is None: greenthread.sleep(CONF.xenserver.introduce_vdi_retry_wait) session.call_xenapi("SR.scan", sr_ref) vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) except session.XenAPI.Failure as exc: LOG.exception(exc) raise exception.StorageError( reason=_('Unable to introduce VDI on SR %s') % sr_ref) if not vdi_ref: raise exception.StorageError( reason=_('VDI not found on SR %(sr)s (vdi_uuid ' '%(vdi_uuid)s, target_lun %(target_lun)s)') % {'sr': sr_ref, 'vdi_uuid': vdi_uuid, 'target_lun': target_lun}) try: vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) LOG.debug(vdi_rec) LOG.debug(type(vdi_rec)) except session.XenAPI.Failure as exc: LOG.exception(exc) raise exception.StorageError( reason=_('Unable to get record of VDI %s on') % vdi_ref) if vdi_rec['managed']: # We do not need to introduce the vdi return vdi_ref try: return session.call_xenapi("VDI.introduce", vdi_rec['uuid'], vdi_rec['name_label'], vdi_rec['name_description'], vdi_rec['SR'], vdi_rec['type'], vdi_rec['sharable'], vdi_rec['read_only'], vdi_rec['other_config'], vdi_rec['location'], vdi_rec['xenstore_data'], vdi_rec['sm_config']) except session.XenAPI.Failure as exc: LOG.exception(exc) raise exception.StorageError( reason=_('Unable to introduce VDI for SR %s') % sr_ref)
def get_device_number(mountpoint): device_number = _mountpoint_to_number(mountpoint) if device_number < 0: raise exception.StorageError( reason=_('Unable to obtain target information %s') % mountpoint) return device_number
def _attach_volume_iscsi(self, connection_info, instance, mountpoint): """Attach iscsi volume storage to VM instance.""" instance_name = instance['name'] vm_ref = vm_util.get_vm_ref(self._session, instance) # Attach Volume to VM LOG.debug("Attach_volume: %(connection_info)s, %(instance_name)s, " "%(mountpoint)s", {'connection_info': connection_info, 'instance_name': instance_name, 'mountpoint': mountpoint}, instance=instance) data = connection_info['data'] # Discover iSCSI Target device_name = self._iscsi_discover_target(data)[0] if device_name is None: raise exception.StorageError( reason=_("Unable to find iSCSI Target")) # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") (vmdk_file_path, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type(hardware_devices) self.attach_disk_to_vm(vm_ref, instance, adapter_type, 'rdmp', device_name=device_name) LOG.info(_("Mountpoint %(mountpoint)s attached to " "instance %(instance_name)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance)
def _attach_volume_iscsi(self, connection_info, instance, adapter_type=None): """Attach iscsi volume storage to VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) # Attach Volume to VM LOG.debug("_attach_volume_iscsi: %s", connection_info, instance=instance) data = connection_info['data'] # Discover iSCSI Target device_name = self._iscsi_discover_target(data)[0] if device_name is None: raise exception.StorageError( reason=_("Unable to find iSCSI Target")) if adapter_type is None: # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method( vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") adapter_type = vm_util.get_scsi_adapter_type(hardware_devices) self.attach_disk_to_vm(vm_ref, instance, adapter_type, 'rdmp', device_name=device_name) LOG.debug("Attached ISCSI: %s", connection_info, instance=instance)
def _attach_volume_iscsi(self, connection_info, instance, adapter_type=None): """Attach iscsi volume storage to VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) # Attach Volume to VM LOG.debug("_attach_volume_iscsi: %s", connection_info, instance=instance) data = connection_info['data'] # Discover iSCSI Target device_name = self._iscsi_discover_target(data)[0] if device_name is None: raise exception.StorageError( reason=_("Unable to find iSCSI Target")) vmdk = vm_util.get_vmdk_info(self._session, vm_ref) adapter_type = adapter_type or vmdk.adapter_type self.attach_disk_to_vm(vm_ref, instance, adapter_type, 'rdmp', device_name=device_name) LOG.debug("Attached ISCSI: %s", connection_info, instance=instance)
def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd, mock_detach): mock_vm.return_value = "vm_ref" mock_vbd.side_effect = exception.StorageError(reason="") self.ops.detach_volume({}, "name", "/dev/xvdd") self.assertFalse(mock_detach.called)
def find_sr_from_vdi(session, vdi_ref): """Find the SR reference from the VDI reference.""" try: sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) except session.XenAPI.Failure: LOG.exception(_LE('Unable to find SR from VDI')) raise exception.StorageError( reason=_('Unable to find SR from VDI %s') % vdi_ref) return sr_ref
def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref2'] find_sr_from_vdi.side_effect = [exception.StorageError(reason=''), sr_refs[0]] self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs)
def find_sr_from_vbd(session, vbd_ref): """Find the SR reference from the VBD reference.""" try: vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) except session.XenAPI.Failure: LOG.exception(_('Unable to find SR from VBD')) raise exception.StorageError( reason=_('Unable to find SR from VBD %s') % vbd_ref) return sr_ref
class CleanupFromVDIsTestCase(VolumeOpsTestBase): def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs, sr_refs): find_sr_calls = [ mock.call(self.ops._session, vdi_ref) for vdi_ref in vdi_refs ] find_sr_from_vdi.assert_has_calls(find_sr_calls) purge_sr_calls = [ mock.call(self.ops._session, sr_ref) for sr_ref in sr_refs ] purge_sr.assert_has_calls(purge_sr_calls) @mock.patch.object(volume_utils, 'find_sr_from_vdi') @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref1', 'sr_ref2'] find_sr_from_vdi.side_effect = sr_refs self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs) @mock.patch.object( volume_utils, 'find_sr_from_vdi', side_effect=[exception.StorageError(reason=''), 'sr_ref2']) @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis_handles_find_sr_exception( self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref2'] find_sr_from_vdi.side_effect = [ exception.StorageError(reason=''), sr_refs[0] ] self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs) @mock.patch.object(volume_utils, 'find_sr_from_vdi') @mock.patch.object(volume_utils, 'purge_sr') def test_safe_cleanup_from_vdis_handles_purge_sr_exception( self, purge_sr, find_sr_from_vdi): vdi_refs = ['vdi_ref1', 'vdi_ref2'] sr_refs = ['sr_ref1', 'sr_ref2'] find_sr_from_vdi.side_effect = sr_refs purge_sr.side_effects = [test.TestingException, None] self.ops.safe_cleanup_from_vdis(vdi_refs) self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, sr_refs)
def _get_vmdk_backed_disk_device(self, vm_ref, connection_info_data): # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method( vutil, "get_object_property", vm_ref, "config.hardware.device") # Get disk uuid disk_uuid = self._get_volume_uuid(vm_ref, connection_info_data['volume_id']) device = vm_util.get_vmdk_backed_disk_device(hardware_devices, disk_uuid) if not device: raise exception.StorageError(reason=_("Unable to find volume")) return device
def _detach_volume_iscsi(self, connection_info, instance): """Detach volume storage to VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) # Detach Volume from VM LOG.debug("_detach_volume_iscsi: %s", connection_info, instance=instance) data = connection_info['data'] # Discover iSCSI Target device_name, uuid = self._iscsi_get_target(data) if device_name is None: raise exception.StorageError( reason=_("Unable to find iSCSI Target")) # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method( vutil, "get_object_property", vm_ref, "config.hardware.device") device = vm_util.get_rdm_disk(hardware_devices, uuid) if device is None: raise exception.StorageError(reason=_("Unable to find volume")) self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True) LOG.debug("Detached ISCSI: %s", connection_info, instance=instance)
def _parse_volume_info(connection_data): """Parse device_path and mountpoint as they can be used by XenAPI. In particular, the mountpoint (e.g. /dev/sdc) must be translated into a numeric literal. """ volume_id = connection_data['volume_id'] target_portal = connection_data['target_portal'] target_host = _get_target_host(target_portal) target_port = _get_target_port(target_portal) target_iqn = connection_data['target_iqn'] log_params = { "vol_id": volume_id, "host": target_host, "port": target_port, "iqn": target_iqn } LOG.debug('(vol_id,host,port,iqn): ' '(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)', log_params) if (volume_id is None or target_host is None or target_iqn is None): raise exception.StorageError( reason=_('Unable to obtain target information %s') % strutils.mask_password(connection_data)) volume_info = {} volume_info['id'] = volume_id volume_info['target'] = target_host volume_info['port'] = target_port volume_info['targetIQN'] = target_iqn if ('auth_method' in connection_data and connection_data['auth_method'] == 'CHAP'): volume_info['chapuser'] = connection_data['auth_username'] volume_info['chappassword'] = connection_data['auth_password'] return volume_info
def get_pool_info(self): # NOTE(melwitt): We're executing 'ceph df' here instead of calling # the RADOSClient.get_cluster_stats python API because we need # access to the MAX_AVAIL stat, which reports the available bytes # taking replication into consideration. The global available stat # from the RADOSClient.get_cluster_stats python API does not take # replication size into consideration and will simply return the # available storage per OSD, added together across all OSDs. The # MAX_AVAIL stat will divide by the replication size when doing the # calculation. args = ['ceph', 'df', '--format=json'] + self.ceph_args() try: out, _ = processutils.execute(*args) except processutils.ProcessExecutionError: LOG.exception('Could not determine disk usage') raise exception.StorageError( reason='Could not determine disk usage') stats = jsonutils.loads(out) # Find the pool for which we are configured. pool_stats = None for pool in stats['pools']: if pool['name'] == self.pool: pool_stats = pool['stats'] break if pool_stats is None: raise exception.NotFound('Pool %s could not be found.' % self.pool) return { 'total': stats['stats']['total_bytes'], 'free': pool_stats['max_avail'], 'used': pool_stats['bytes_used'] }