def terminate_connection(self, volume, connector, **kwargs): """Detach volume from the initiator.""" vpsa_volume = self._get_vpsa_volume(volume) if connector is None: # Detach volume from all servers # Get volume name self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume) return # Check if there are multiple attachments to the volume from the # same host. Terminate connection only for the last attachment from # the corresponding host. count = 0 host = connector.get('host') if connector else None if host and volume.get('multiattach'): attach_list = volume.volume_attachment for attachment in attach_list: if (attachment['attach_status'] != fields.VolumeAttachStatus.ATTACHED): continue if attachment.attached_host == host: count += 1 if count > 1: return # Get server name for IQN initiator_name = connector['initiator'] vpsa_srv = self.vpsa._get_server_name(initiator_name, False) if not vpsa_srv: raise zadara_exception.ZadaraServerNotFound(name=initiator_name) if not vpsa_volume: raise cinder_exception.VolumeNotFound(volume_id=volume.id) # Detach volume from server self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume, vpsa_srv=vpsa_srv)
def delete_volume(self, volume): """ Delete volume. Return ok if doesn't exist. Auto detach from all servers. """ # Get volume name name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: msg = _('Volume %(name)s could not be found. ' 'It might be already deleted') % { 'name': name } LOG.warning(msg) if self.configuration.zadara_vpsa_allow_nonexistent_delete: return else: raise exception.VolumeNotFound(volume_id=name) # Check attachment info and detach from all xml_tree = self.vpsa.send_cmd('list_vol_attachments', vpsa_vol=vpsa_vol) servers = self._xml_parse_helper(xml_tree, 'servers', ('iqn', None), first=False) if servers: if not self.configuration.zadara_vpsa_auto_detach_on_delete: raise exception.VolumeAttached(volume_id=name) for server in servers: vpsa_srv = server.findtext('name') if vpsa_srv: self.vpsa.send_cmd('detach_volume', vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol) # Delete volume self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
def detach_volume(self, context, volume): LOG.debug(_("Entering SolidFire attach_volume...")) sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error( _("Volume ID %s was not found on " "the SolidFire Cluster!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = None attributes['attached_to'] = None params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes} data = self._issue_api_request('ModifyVolume', params) if 'result' not in data: raise exception.SolidFireAPIDataException(data=data)
def _do_create_snapshot(self, snapshot, snapshot_name): """Creates a snapshot.""" LOG.debug(_("Enter SolidFire create_snapshot...")) sf_account_name = socket.gethostname() + '-' + snapshot['project_id'] sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: raise exception.SfAccountNotFound(account_name=sf_account_name) params = {'accountID': sfaccount['accountID']} data = self._issue_api_request('ListVolumesForAccount', params) if 'result' not in data: raise exception.SolidFireAPIDataException(data=data) found_count = 0 volid = -1 for v in data['result']['volumes']: if v['name'] == 'OS-VOLID-%s' % snapshot['volume_id']: found_count += 1 volid = v['volumeID'] if found_count == 0: raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) if found_count != 1: raise exception.DuplicateSfVolumeNames(vol_name='OS-VOLID-%s' % snapshot['volume_id']) params = { 'volumeID': int(volid), 'name': snapshot_name, 'attributes': { 'OriginatingVolume': volid } } data = self._issue_api_request('CloneVolume', params) if 'result' not in data: raise exception.SolidFireAPIDataException(data=data) return (data, sfaccount)
def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ qos = {} attributes = {} sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['retyped_at'] = timeutils.strtime() params = {'volumeID': sf_vol['volumeID']} qos = self._set_qos_by_volume_type(ctxt, new_type['id']) if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes self._issue_api_request('ModifyVolume', params) return True
def _attach_volume_to_vgw(self, volume): volume_name = volume['display_name'] vcloud_volume_name = self._get_vcloud_volume_name( volume['id'], volume_name) # get the provider_volume at provider cloud # find volume reference by it's name result, disk_ref = self._get_disk_ref(vcloud_volume_name) if result: LOG.debug( "Find volume successful, disk name is: %(disk_name)s " "disk ref's href is: %(disk_href)s.", { 'disk_name': vcloud_volume_name, 'disk_href': disk_ref.href }) else: LOG.error(_('Unable to find volume %s'), vcloud_volume_name) raise exception.VolumeNotFound(volume_id=vcloud_volume_name) # Check whether the volume is attached to vm or not, # Make sure the volume is available vms = self._get_disk_attached_vm(vcloud_volume_name) if len(vms) > 0: vm_name = vms[0].get_name() the_vapp = self._get_vcloud_vapp(vm_name) if the_vapp: self._detach_disk_from_vm(the_vapp, disk_ref) # get the vgw host vapp_name = self._vgw_name the_vapp = self._get_vcloud_vapp(vapp_name) # attach volume to vgw when the vgw is in stopped status if self._attach_disk_to_vm(the_vapp, disk_ref): LOG.info( "Volume %(volume_name)s attached to " "vgw host: %(instance_name)s", { 'volume_name': vcloud_volume_name, 'instance_name': vapp_name }) return disk_ref, the_vapp
def _get_model_info(self, sfaccount, sf_volume_id): """Gets the connection info for specified account and volume.""" cluster_info = self._get_cluster_info() iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' chap_secret = sfaccount['targetSecret'] found_volume = False iteration_count = 0 while not found_volume and iteration_count < 600: volume_list = self._get_volumes_by_sfaccount( sfaccount['accountID']) iqn = None for v in volume_list: if v['volumeID'] == sf_volume_id: iqn = v['iqn'] found_volume = True break if not found_volume: time.sleep(2) iteration_count += 1 if not found_volume: LOG.error(_('Failed to retrieve volume SolidFire-' 'ID: %s in get_by_account!') % sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = {} # NOTE(john-griffith): SF volumes are always at lun 0 model_update['provider_location'] = ('%s %s %s' % (iscsi_portal, iqn, 0)) model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], chap_secret)) if not self.configuration.sf_emulate_512: model_update['provider_geometry'] = ('%s %s' % (4096, 4096)) return model_update
def backup(self, backup, volume_file, backup_metadata=True): """Backup azure volume to azure . only support backup from and to azure. """ volume = self.db.volume_get(self.context, backup['volume_id']) account_type = StorageAccountTypes.standard_lrs # backup with --snapshot-id if backup['snapshot_id'] is not None: src_vref_name = self._get_name_from_id( SNAPSHOT_PREFIX, backup['snapshot_id']) resource_driver = self.snapshots # backup volume else: src_vref_name = self._get_name_from_id( VOLUME_PREFIX, volume['id']) resource_driver = self.disks disk_name = self._get_name_from_id( BACKUP_PREFIX, backup['id']) try: src_vref_obj = resource_driver.get( CONF.azure.resource_group, src_vref_name ) except Exception as e: message = (_("Create Back of %(volume)s in Azure" " failed. reason: %(reason)s") % dict(volume=src_vref_name, reason=six.text_type(e))) LOG.exception(message) raise exception.VolumeNotFound(volume_id=volume['id']) else: self._copy_snapshot(disk_name, src_vref_obj.id, account_type)
def attach_volume(self, context, volume, instance_uuid, host_name, mountpoint): LOG.debug("Entering SolidFire attach_volume...") sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error( _LE("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "attach_volume operation!"), volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) attributes = sf_vol['attributes'] attributes['attach_time'] = volume.get('attach_time', None) attributes['attached_to'] = instance_uuid params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes} data = self._issue_api_request('ModifyVolume', params) if 'result' not in data: raise exception.SolidFireAPIDataException(data=data)
def _send_request(self, object_type, key, request): try: response = urllib2.urlopen(request) except (urllib2.HTTPError, ) as exc: if exc.code == 400 and hasattr(exc, 'read'): error = json.load(exc) err_msg = error['message'] if err_msg.endswith(OBJ_NOT_FOUND_ERR): LOG.warning( _LW("object %(key)s of " "type %(typ)s not found"), { 'key': key, 'typ': object_type }) raise exception.NotFound() elif err_msg == VOL_NOT_UNIQUE_ERR: LOG.error(_LE("can't create 2 volumes with the same name")) msg = (_('Volume by this name already exists')) raise exception.VolumeBackendAPIException(data=msg) elif err_msg == VOL_OBJ_NOT_FOUND_ERR: LOG.error(_LE("Can't find volume to map %s"), key) raise exception.VolumeNotFound(volume_id=key) elif ALREADY_MAPPED_ERR in err_msg: raise exception.XtremIOAlreadyMappedError() LOG.error(_LE('Bad response from XMS, %s'), exc.read()) msg = (_('Exception: %s') % six.text_type(exc)) raise exception.VolumeDriverException(message=msg) if response.code >= 300: LOG.error(_LE('bad API response, %s'), response.msg) msg = (_('bad response from XMS got http code %(code)d, %(msg)s') % { 'code': response.code, 'msg': response.msg }) raise exception.VolumeBackendAPIException(data=msg) return response
def _get_volume(v_id): d = {'id': '1', 'name': 'volume1', 'worldWideName': '0'} if v_id in d: return d[v_id] else: raise exception.VolumeNotFound(message=v_id)
def delete_lun(volume_name): raise exception.VolumeNotFound(volume_id=fake_volume['name'])
def _do_clone_volume(self, src_uuid, src_project_id, v_ref): """Create a clone of an existing volume. Currently snapshots are the same as clones on the SF cluster. Due to the way the SF cluster works there's no loss in efficiency or space usage between the two. The only thing different right now is the restore snapshot functionality which has not been implemented in the pre-release version of the SolidFire Cluster. """ attributes = {} qos = {} sfaccount = self._get_sfaccount(src_project_id) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(src_uuid, params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) if src_project_id != v_ref['project_id']: sfaccount = self._create_sfaccount(v_ref['project_id']) params = {'volumeID': int(sf_vol['volumeID']), 'name': 'UUID-%s' % v_ref['id'], 'newSize': int(v_ref['size'] * self.GB), 'newAccountID': sfaccount['accountID']} data = self._issue_api_request('CloneVolume', params) if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise exception.SolidFireAPIException(msg) sf_volume_id = data['result']['volumeID'] if (self.configuration.sf_allow_tenant_qos and v_ref.get('volume_metadata')is not None): qos = self._set_qos_presets(v_ref) ctxt = context.get_admin_context() type_id = v_ref.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id) # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided params = {'volumeID': sf_volume_id} create_time = timeutils.strtime(v_ref['created_at']) attributes = {'uuid': v_ref['id'], 'is_clone': 'True', 'src_uuid': src_uuid, 'created_at': create_time} if qos: params['qos'] = qos for k, v in qos.items(): attributes[k] = str(v) params['attributes'] = attributes data = self._issue_api_request('ModifyVolume', params) model_update = self._get_model_info(sfaccount, sf_volume_id) if model_update is None: mesg = _('Failed to get model update from clone') raise exception.SolidFireAPIException(mesg) return (data, sfaccount, model_update)
def test_usage_from_deleted_snapshot(self, volume_get_by_id): raw_volume = { 'id': fake.VOLUME_ID, 'availability_zone': 'nova', 'deleted': 1 } ctxt = context.get_admin_context() volume_obj = fake_volume.fake_volume_obj(ctxt, **raw_volume) volume_get_by_id.side_effect = exception.VolumeNotFound( volume_id=fake.VOLUME_ID) raw_snapshot = { 'project_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'volume': volume_obj, 'volume_id': fake.VOLUME_ID, 'volume_size': 1, 'id': fake.SNAPSHOT_ID, 'display_name': '11', 'created_at': '2014-12-11T10:10:00', 'status': fields.SnapshotStatus.ERROR, 'deleted': '', 'snapshot_metadata': [{ 'key': 'fake_snap_meta_key', 'value': 'fake_snap_meta_value' }], 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(ctxt, **raw_snapshot) usage_info = volume_utils._usage_from_snapshot(snapshot_obj) expected_snapshot = { 'tenant_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'availability_zone': '', 'volume_id': fake.VOLUME_ID, 'volume_size': 1, 'snapshot_id': fake.SNAPSHOT_ID, 'display_name': '11', 'created_at': 'DONTCARE', 'status': fields.SnapshotStatus.ERROR, 'deleted': '', 'metadata': six.text_type({'fake_snap_meta_key': u'fake_snap_meta_value'}), } self.assertDictMatch(expected_snapshot, usage_info)
def return_volume_nonexistent(*args, **kwargs): raise exception.VolumeNotFound('bogus test message')
def initialize_connection(self, volume, connector): """Attach volume to initiator/host. During this call VPSA exposes volume to particular Initiator. It also creates a 'server' entity for Initiator (if it was not created before) All necessary connection information is returned, including auth data. Connection data (target, LUN) is not stored in the DB. """ # Get/Create server name for IQN initiator_name = connector['initiator'] vpsa_srv = self._create_vpsa_server(initiator_name) if not vpsa_srv: raise exception.ZadaraServerCreateFailure(name=initiator_name) # Get volume name name = self.configuration.zadara_vol_name_template % volume['name'] vpsa_vol = self._get_vpsa_volume_name(name) if not vpsa_vol: raise exception.VolumeNotFound(volume_id=volume['id']) # Get Active controller details ctrl = self._get_active_controller_details() if not ctrl: raise exception.ZadaraVPSANoActiveController() xml_tree = self.vpsa.send_cmd('list_vol_attachments', vpsa_vol=vpsa_vol) attach = self._xml_parse_helper(xml_tree, 'servers', ('name', vpsa_srv)) # Attach volume to server if attach is None: self.vpsa.send_cmd('attach_volume', vpsa_srv=vpsa_srv, vpsa_vol=vpsa_vol) # Get connection info xml_tree = self.vpsa.send_cmd('list_vol_attachments', vpsa_vol=vpsa_vol) server = self._xml_parse_helper(xml_tree, 'servers', ('iqn', initiator_name)) if server is None: raise exception.ZadaraAttachmentsNotFound(name=name) target = server.findtext('target') lun = int(server.findtext('lun')) if target is None or lun is None: raise exception.ZadaraInvalidAttachmentInfo( name=name, reason=_('target=%(target)s, lun=%(lun)s') % { 'target': target, 'lun': lun }) properties = {} properties['target_discovered'] = False properties['target_portal'] = '%s:%s' % (ctrl['ip'], '3260') properties['target_iqn'] = target properties['target_lun'] = lun properties['volume_id'] = volume['id'] properties['auth_method'] = 'CHAP' properties['auth_username'] = ctrl['chap_user'] properties['auth_password'] = ctrl['chap_passwd'] LOG.debug('Attach properties: %(properties)s', {'properties': properties}) return { 'driver_volume_type': ('iser' if (self.configuration.safe_get('zadara_use_iser')) else 'iscsi'), 'data': properties }
def stub_volume_get_raise_exc(self, context, volume_id): raise exception.VolumeNotFound(volume_id=volume_id)
def delete_volume_metadata(context, volume_id, key, meta_type): if volume_id == fake.will_not_be_found_id: raise exc.VolumeNotFound(volume_id) pass
def return_empty_container_metadata(context, volume_id, metadata, delete, meta_type): if volume_id == fake.will_not_be_found_id: raise exc.VolumeNotFound(volume_id) return {}
def return_empty_volume_metadata(context, volume_id): if volume_id == fake.will_not_be_found_id: raise exc.VolumeNotFound(volume_id) return {}
def volume_get(self, context, volume_id, viewable_admin_meta=False): if volume_id == fake.VOLUME_ID: return objects.Volume(context, id=fake.VOLUME_ID, _name_id=fake.VOLUME2_ID, host='fake_host', cluster_name=None) raise exception.VolumeNotFound(volume_id=volume_id)
class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase): """Test case for VolumeDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" FAKE_VOLUME = {'name': 'test1', 'id': 'test1'} @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') def test_delete_volume_invalid_parameter(self, _mock_create_export, mock_exists): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) # Test volume without 'size' field and 'volume_size' field self.assertRaises(exception.InvalidParameterValue, lvm_driver._delete_volume, self.FAKE_VOLUME) @mock.patch.object(os.path, 'exists', return_value=False) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') def test_delete_volume_bad_path(self, _mock_create_export, mock_exists): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.volume_type = 'default' volume = dict(self.FAKE_VOLUME, size=1) lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) self.assertRaises(exception.VolumeBackendAPIException, lvm_driver._delete_volume, volume) @mock.patch.object(volutils, 'clear_volume') @mock.patch.object(volutils, 'copy_volume') @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') def test_delete_volume_thinlvm_snap(self, _mock_create_export, mock_copy, mock_clear): vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.lvm_type = 'thin' self.configuration.target_helper = 'tgtadm' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, vg_obj=vg_obj, db=db) uuid = '00000000-0000-0000-0000-c3aa7ee01536' fake_snapshot = {'name': 'volume-' + uuid, 'id': uuid, 'size': 123} lvm_driver._delete_volume(fake_snapshot, is_snapshot=True) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version', return_value=(2, 2, 100)) def test_check_for_setup_error(self, _mock_get_version, vgs): vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'auto') configuration = conf.Configuration(fake_opt, 'fake_group') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj, db=db) lvm_driver.delete_snapshot = mock.Mock() volume = tests_utils.create_volume(self.context, host=socket.gethostname()) volume_id = volume['id'] backup = {} backup['volume_id'] = volume_id backup['user_id'] = fake.USER_ID backup['project_id'] = fake.PROJECT_ID backup['host'] = socket.gethostname() backup['availability_zone'] = '1' backup['display_name'] = 'test_check_for_setup_error' backup['display_description'] = 'test_check_for_setup_error' backup['container'] = 'fake' backup['status'] = fields.BackupStatus.CREATING backup['fail_reason'] = '' backup['service'] = 'fake' backup['parent_id'] = None backup['size'] = 5 * 1024 * 1024 backup['object_count'] = 22 db.backup_create(self.context, backup) lvm_driver.check_for_setup_error() def test_retype_volume(self): vol = tests_utils.create_volume(self.context) new_type = fake.VOLUME_TYPE_ID diff = {} host = 'fake_host' retyped = self.volume.driver.retype(self.context, vol, new_type, diff, host) self.assertTrue(retyped) def test_update_migrated_volume(self): fake_volume_id = fake.VOLUME_ID fake_new_volume_id = fake.VOLUME2_ID fake_provider = 'fake_provider' original_volume_name = CONF.volume_name_template % fake_volume_id current_name = CONF.volume_name_template % fake_new_volume_id fake_volume = tests_utils.create_volume(self.context) fake_volume['id'] = fake_volume_id fake_new_volume = tests_utils.create_volume(self.context) fake_new_volume['id'] = fake_new_volume_id fake_new_volume['provider_location'] = fake_provider fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') with mock.patch.object(self.volume.driver, 'vg') as vg: vg.return_value = fake_vg vg.rename_volume.return_value = None update = self.volume.driver.update_migrated_volume( self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual({ '_name_id': None, 'provider_location': None }, update) vg.rename_volume.reset_mock() vg.rename_volume.side_effect = processutils.ProcessExecutionError update = self.volume.driver.update_migrated_volume( self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual( { '_name_id': fake_new_volume_id, 'provider_location': fake_provider }, update) def test_create_volume_from_snapshot_none_sparse(self): with mock.patch.object(self.volume.driver, 'vg'), \ mock.patch.object(self.volume.driver, '_create_volume'), \ mock.patch.object(volutils, 'copy_volume') as mock_copy: # Test case for thick LVM src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) self.volume.driver.create_volume_from_snapshot( dst_volume, snapshot_ref) volume_path = self.volume.driver.local_path(dst_volume) snapshot_path = self.volume.driver.local_path(snapshot_ref) volume_size = 1024 block_size = '1M' mock_copy.assert_called_with(snapshot_path, volume_path, volume_size, block_size, execute=self.volume.driver._execute, sparse=False) def test_create_volume_from_snapshot_sparse(self): self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) with mock.patch.object(lvm_driver, 'vg'): # Test case for thin LVM lvm_driver._sparse_copy_volume = True src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) lvm_driver.create_volume_from_snapshot(dst_volume, snapshot_ref) def test_create_volume_from_snapshot_sparse_extend(self): self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) with mock.patch.object(lvm_driver, 'vg'), \ mock.patch.object(lvm_driver, 'extend_volume') as mock_extend: # Test case for thin LVM lvm_driver._sparse_copy_volume = True src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) dst_volume['size'] = snapshot_ref['volume_size'] + 1 lvm_driver.create_volume_from_snapshot(dst_volume, snapshot_ref) mock_extend.assert_called_with(dst_volume, dst_volume['size']) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_thin_pool_exists(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch.object(cinder.brick.local_dev.lvm.LVM, 'get_volumes', return_value=[]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_no_lvs(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lv_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.activate_lv') @mock.patch('cinder.brick.local_dev.lvm.LVM.' 'supports_lvchange_ignoreskipactivation') @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_support(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lv_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.activate_lv') @mock.patch('cinder.brick.local_dev.lvm.LVM.' 'supports_lvchange_ignoreskipactivation') @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_volume') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_pool(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(lvm.LVMVolumeDriver, 'extend_volume') def test_create_cloned_volume_by_thin_snapshot(self, mock_extend): self.configuration.lvm_type = 'thin' fake_vg = mock.Mock( fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default')) lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, vg_obj=fake_vg, db=db) fake_volume = tests_utils.create_volume(self.context, size=1) fake_new_volume = tests_utils.create_volume(self.context, size=2) lvm_driver.create_cloned_volume(fake_new_volume, fake_volume) fake_vg.create_lv_snapshot.assert_called_once_with( fake_new_volume['name'], fake_volume['name'], 'thin') mock_extend.assert_called_once_with(fake_new_volume, 2) fake_vg.activate_lv.assert_called_once_with(fake_new_volume['name'], is_snapshot=True, permanent=True) def test_lvm_migrate_volume_no_loc_info(self): host = {'capabilities': {}} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_bad_loc_info(self): capabilities = {'location_info': 'foo'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_driver(self): capabilities = {'location_info': 'FooDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_host(self): capabilities = {'location_info': 'LVMVolumeDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_in_use(self): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) def test_lvm_migrate_volume_same_volume_group(self, vgs): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.migrate_volume, self.context, vol, host) @mock.patch.object(lvm.LVMVolumeDriver, '_create_volume') @mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes') @mock.patch.object(brick_lvm.LVM, 'delete') @mock.patch.object(volutils, 'copy_volume', side_effect=processutils.ProcessExecutionError) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) def test_lvm_migrate_volume_volume_copy_error(self, vgs, copy_volume, mock_delete, mock_pvs, mock_create): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes-old', False, None, 'default') self.assertRaises(processutils.ProcessExecutionError, self.volume.driver.migrate_volume, self.context, vol, host) mock_delete.assert_called_once_with(vol) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes-2' }]) def test_lvm_volume_group_missing(self, vgs): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-3:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_proceed(self): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(self.volume.driver, '_execute') \ as mock_execute, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volutils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(self.volume.driver, '_delete_volume'): self.volume.driver.vg = fake_lvm.FakeBrickLVM( 'cinder-volumes', False, None, 'default') mock_execute.return_value = ("mock_outs", "mock_errs") moved, model_update = \ self.volume.driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=False) def test_lvm_migrate_volume_proceed_with_thin(self): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(lvm_driver, '_execute') \ as mock_execute, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volutils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(lvm_driver, '_delete_volume'): lvm_driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver._sparse_copy_volume = True mock_execute.return_value = ("mock_outs", "mock_errs") moved, model_update = \ lvm_driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=True) @staticmethod def _get_manage_existing_lvs(name): """Helper method used by the manage_existing tests below.""" lvs = [{ 'name': 'fake_lv', 'size': '1.75' }, { 'name': 'fake_lv_bad_size', 'size': 'Not a float' }] for lv in lvs: if lv['name'] == name: return lv def _setup_stubs_for_manage_existing(self): """Helper to set up common stubs for the manage_existing tests.""" self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') @mock.patch.object(db.sqlalchemy.api, 'volume_get', side_effect=exception.VolumeNotFound( volume_id='d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')) def test_lvm_manage_existing_not_found(self, mock_vol_get): self._setup_stubs_for_manage_existing() vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': 'fake_lv'} vol = {'name': vol_name, 'id': fake.VOLUME_ID, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) def test_lvm_manage_existing_already_managed(self, exists_mock): self._setup_stubs_for_manage_existing() vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': vol_name} vol = {'name': 'test', 'id': 1, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): self.assertRaises(exception.ManageExistingAlreadyManaged, self.volume.driver.manage_existing, vol, ref) def test_lvm_manage_existing(self): """Good pass on managing an LVM volume. This test case ensures that, when a logical volume with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(vol['name'], new_name) with mock.patch.object(self.volume.driver.vg, 'rename_volume') as mock_rename_volume, \ mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): mock_rename_volume.return_value = _rename_volume size = self.volume.driver.manage_existing_get_size(vol, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 2} with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_bad_ref(self): """Error case where specified LV doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a volume that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} vol = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'} with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises(exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_snapshot(self): """Good pass on managing an LVM snapshot. This test case ensures that, when a logical volume's snapshot with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing_snapshot, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(snp['name'], new_name) with mock.patch.object(self.volume.driver.vg, 'rename_volume') as mock_rename_volume, \ mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): mock_rename_volume.return_value = _rename_volume size = self.volume.driver.manage_existing_snapshot_get_size( snp, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing_snapshot( snp, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_snapshot_bad_ref(self): """Error case where specified LV snapshot doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a snapshot that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} snp = { 'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 0, 'status': 'available', } with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises( exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_revert_snapshot(self): self._setup_stubs_for_manage_existing() self.configuration.lvm_type = 'auto' fake_volume = tests_utils.create_volume(self.context, display_name='fake_volume') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id) with mock.patch.object(self.volume.driver.vg, 'revert') as mock_revert,\ mock.patch.object(self.volume.driver.vg, 'create_lv_snapshot') as mock_create,\ mock.patch.object(self.volume.driver.vg, 'deactivate_lv') as mock_deactive,\ mock.patch.object(self.volume.driver.vg, 'activate_lv') as mock_active: self.volume.driver.revert_to_snapshot(self.context, fake_volume, fake_snapshot) mock_revert.assert_called_once_with( self.volume.driver._escape_snapshot(fake_snapshot.name)) mock_deactive.assert_called_once_with(fake_volume.name) mock_active.assert_called_once_with(fake_volume.name) mock_create.assert_called_once_with( self.volume.driver._escape_snapshot(fake_snapshot.name), fake_volume.name, self.configuration.lvm_type) def test_revert_thin_snapshot(self): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, db=db) fake_volume = tests_utils.create_volume(self.context, display_name='fake_volume') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id) self.assertRaises(NotImplementedError, lvm_driver.revert_to_snapshot, self.context, fake_volume, fake_snapshot) def test_lvm_manage_existing_snapshot_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing_snapshot code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 2} with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises( exception.VolumeBackendAPIException, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_lvm_unmanage(self): volume = tests_utils.create_volume(self.context, status='available', size=1, host=CONF.host) ret = self.volume.driver.unmanage(volume) self.assertIsNone(ret) def test_lvm_get_manageable_volumes(self): cinder_vols = [{'id': '00000000-0000-0000-0000-000000000000'}] lvs = [{ 'name': 'volume-00000000-0000-0000-0000-000000000000', 'size': '1.75' }, { 'name': 'volume-00000000-0000-0000-0000-000000000001', 'size': '3.0' }, { 'name': 'snapshot-00000000-0000-0000-0000-000000000002', 'size': '2.2' }, { 'name': 'myvol', 'size': '4.0' }] self.volume.driver.vg = mock.Mock() self.volume.driver.vg.get_volumes.return_value = lvs self.volume.driver.vg.lv_is_snapshot.side_effect = [ False, False, True, False ] self.volume.driver.vg.lv_is_open.side_effect = [True, False] res = self.volume.driver.get_manageable_volumes( cinder_vols, None, 1000, 0, ['size'], ['asc']) exp = [{ 'size': 2, 'reason_not_safe': 'already managed', 'extra_info': None, 'reference': { 'source-name': 'volume-00000000-0000-0000-0000-000000000000' }, 'cinder_id': '00000000-0000-0000-0000-000000000000', 'safe_to_manage': False }, { 'size': 3, 'reason_not_safe': 'volume in use', 'reference': { 'source-name': 'volume-00000000-0000-0000-0000-000000000001' }, 'safe_to_manage': False, 'cinder_id': None, 'extra_info': None }, { 'size': 4, 'reason_not_safe': None, 'safe_to_manage': True, 'reference': { 'source-name': 'myvol' }, 'cinder_id': None, 'extra_info': None }] self.assertEqual(exp, res) def test_lvm_get_manageable_snapshots(self): cinder_snaps = [{'id': '00000000-0000-0000-0000-000000000000'}] lvs = [{ 'name': 'snapshot-00000000-0000-0000-0000-000000000000', 'size': '1.75' }, { 'name': 'volume-00000000-0000-0000-0000-000000000001', 'size': '3.0' }, { 'name': 'snapshot-00000000-0000-0000-0000-000000000002', 'size': '2.2' }, { 'name': 'mysnap', 'size': '4.0' }] self.volume.driver.vg = mock.Mock() self.volume.driver.vg.get_volumes.return_value = lvs self.volume.driver.vg.lv_is_snapshot.side_effect = [ True, False, True, True ] self.volume.driver.vg.lv_is_open.side_effect = [True, False] self.volume.driver.vg.lv_get_origin.side_effect = [ 'volume-00000000-0000-0000-0000-000000000000', 'volume-00000000-0000-0000-0000-000000000002', 'myvol' ] res = self.volume.driver.get_manageable_snapshots( cinder_snaps, None, 1000, 0, ['size'], ['asc']) exp = [{ 'size': 2, 'reason_not_safe': 'already managed', 'reference': { 'source-name': 'snapshot-00000000-0000-0000-0000-000000000000' }, 'safe_to_manage': False, 'extra_info': None, 'cinder_id': '00000000-0000-0000-0000-000000000000', 'source_reference': { 'source-name': 'volume-00000000-0000-0000-0000-000000000000' } }, { 'size': 3, 'reason_not_safe': 'snapshot in use', 'reference': { 'source-name': 'snapshot-00000000-0000-0000-0000-000000000002' }, 'safe_to_manage': False, 'extra_info': None, 'cinder_id': None, 'source_reference': { 'source-name': 'volume-00000000-0000-0000-0000-000000000002' } }, { 'size': 4, 'reason_not_safe': None, 'reference': { 'source-name': 'mysnap' }, 'safe_to_manage': True, 'cinder_id': None, 'source_reference': { 'source-name': 'myvol' }, 'extra_info': None }] self.assertEqual(exp, res)
class LVMVolumeDriverTestCase(DriverTestCase): """Test case for VolumeDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" FAKE_VOLUME = {'name': 'test1', 'id': 'test1'} @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') def test_delete_volume_invalid_parameter(self, _mock_create_export): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) self.mox.StubOutWithMock(os.path, 'exists') os.path.exists(mox.IgnoreArg()).AndReturn(True) self.mox.ReplayAll() # Test volume without 'size' field and 'volume_size' field self.assertRaises(exception.InvalidParameterValue, lvm_driver._delete_volume, self.FAKE_VOLUME) @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') def test_delete_volume_bad_path(self, _mock_create_export): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.volume_type = 'default' volume = dict(self.FAKE_VOLUME, size=1) lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) self.mox.StubOutWithMock(os.path, 'exists') os.path.exists(mox.IgnoreArg()).AndReturn(False) self.mox.ReplayAll() self.assertRaises(exception.VolumeBackendAPIException, lvm_driver._delete_volume, volume) @mock.patch.object(fake_driver.FakeISCSIDriver, 'create_export') def test_delete_volume_thinlvm_snap(self, _mock_create_export): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.lvm_type = 'thin' self.configuration.iscsi_helper = 'tgtadm' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, vg_obj=mox.MockAnything(), db=db) # Ensures that copy_volume is not called for ThinLVM self.mox.StubOutWithMock(volutils, 'copy_volume') self.mox.StubOutWithMock(volutils, 'clear_volume') self.mox.StubOutWithMock(lvm_driver, '_execute') self.mox.ReplayAll() uuid = '00000000-0000-0000-0000-c3aa7ee01536' fake_snapshot = {'name': 'volume-' + uuid, 'id': uuid, 'size': 123} lvm_driver._delete_volume(fake_snapshot, is_snapshot=True) def test_check_for_setup_error(self): def get_all_volume_groups(vg): return [{'name': 'cinder-volumes'}] self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') configuration = conf.Configuration(fake_opt, 'fake_group') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj, db=db) lvm_driver.delete_snapshot = mock.Mock() self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) volume = tests_utils.create_volume(self.context, host=socket.gethostname()) volume_id = volume['id'] backup = {} backup['volume_id'] = volume_id backup['user_id'] = fake.USER_ID backup['project_id'] = fake.PROJECT_ID backup['host'] = socket.gethostname() backup['availability_zone'] = '1' backup['display_name'] = 'test_check_for_setup_error' backup['display_description'] = 'test_check_for_setup_error' backup['container'] = 'fake' backup['status'] = fields.BackupStatus.CREATING backup['fail_reason'] = '' backup['service'] = 'fake' backup['parent_id'] = None backup['size'] = 5 * 1024 * 1024 backup['object_count'] = 22 db.backup_create(self.context, backup) lvm_driver.check_for_setup_error() @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_backup_volume(self, mock_volume_get, mock_get_connector_properties, mock_file_open, mock_temporary_chown): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() self.volume.driver._detach_volume = mock.MagicMock() self.volume.driver._attach_volume = mock.MagicMock() self.volume.driver.terminate_connection = mock.MagicMock() mock_volume_get.return_value = vol mock_get_connector_properties.return_value = properties f = mock_file_open.return_value = open('/dev/null', 'rb') backup_service.backup(backup_obj, f, None) self.volume.driver._attach_volume.return_value = attach_info self.volume.driver.backup_volume(self.context, backup_obj, backup_service) mock_volume_get.assert_called_with(self.context, vol['id']) def test_retype_volume(self): vol = tests_utils.create_volume(self.context) new_type = fake.VOLUME_TYPE_ID diff = {} host = 'fake_host' retyped = self.volume.driver.retype(self.context, vol, new_type, diff, host) self.assertTrue(retyped) def test_update_migrated_volume(self): fake_volume_id = fake.VOLUME_ID fake_new_volume_id = fake.VOLUME2_ID fake_provider = 'fake_provider' original_volume_name = CONF.volume_name_template % fake_volume_id current_name = CONF.volume_name_template % fake_new_volume_id fake_volume = tests_utils.create_volume(self.context) fake_volume['id'] = fake_volume_id fake_new_volume = tests_utils.create_volume(self.context) fake_new_volume['id'] = fake_new_volume_id fake_new_volume['provider_location'] = fake_provider fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') with mock.patch.object(self.volume.driver, 'vg') as vg: vg.return_value = fake_vg vg.rename_volume.return_value = None update = self.volume.driver.update_migrated_volume( self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual({ '_name_id': None, 'provider_location': None }, update) vg.rename_volume.reset_mock() vg.rename_volume.side_effect = processutils.ProcessExecutionError update = self.volume.driver.update_migrated_volume( self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual( { '_name_id': fake_new_volume_id, 'provider_location': fake_provider }, update) @mock.patch.object(utils, 'temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_backup_volume_inuse(self, mock_volume_get, mock_get_connector_properties, mock_file_open, mock_temporary_chown): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID mock_volume_get.return_value = vol temp_snapshot = tests_utils.create_snapshot(self.context, vol['id']) backup = tests_utils.create_backup(self.context, vol['id']) backup_obj = objects.Backup.get_by_id(self.context, backup.id) properties = {} attach_info = {'device': {'path': '/dev/null'}} backup_service = mock.Mock() self.volume.driver._detach_volume = mock.MagicMock() self.volume.driver._attach_volume = mock.MagicMock() self.volume.driver.terminate_connection = mock.MagicMock() self.volume.driver._create_temp_snapshot = mock.MagicMock() self.volume.driver._delete_temp_snapshot = mock.MagicMock() mock_get_connector_properties.return_value = properties f = mock_file_open.return_value = open('/dev/null', 'rb') backup_service.backup(backup_obj, f, None) self.volume.driver._attach_volume.return_value = attach_info self.volume.driver._create_temp_snapshot.return_value = temp_snapshot self.volume.driver.backup_volume(self.context, backup_obj, backup_service) mock_volume_get.assert_called_with(self.context, vol['id']) self.volume.driver._create_temp_snapshot.assert_called_once_with( self.context, vol) self.volume.driver._delete_temp_snapshot.assert_called_once_with( self.context, temp_snapshot) def test_create_volume_from_snapshot_none_sparse(self): with mock.patch.object(self.volume.driver, 'vg'), \ mock.patch.object(self.volume.driver, '_create_volume'), \ mock.patch.object(volutils, 'copy_volume') as mock_copy: # Test case for thick LVM src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) self.volume.driver.create_volume_from_snapshot( dst_volume, snapshot_ref) volume_path = self.volume.driver.local_path(dst_volume) snapshot_path = self.volume.driver.local_path(snapshot_ref) volume_size = 1024 block_size = '1M' mock_copy.assert_called_with(snapshot_path, volume_path, volume_size, block_size, execute=self.volume.driver._execute, sparse=False) def test_create_volume_from_snapshot_sparse(self): self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) with mock.patch.object(lvm_driver, 'vg'), \ mock.patch.object(lvm_driver, '_create_volume'), \ mock.patch.object(volutils, 'copy_volume') as mock_copy: # Test case for thin LVM lvm_driver._sparse_copy_volume = True src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) lvm_driver.create_volume_from_snapshot(dst_volume, snapshot_ref) volume_path = lvm_driver.local_path(dst_volume) snapshot_path = lvm_driver.local_path(snapshot_ref) volume_size = 1024 block_size = '1M' mock_copy.assert_called_with(snapshot_path, volume_path, volume_size, block_size, execute=lvm_driver._execute, sparse=True) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_thin_pool_exists(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch.object(cinder.brick.local_dev.lvm.LVM, 'get_volumes', return_value=[]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_no_lvs(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_support(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.utils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_volume') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_pool(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(lvm.LVMVolumeDriver, 'extend_volume') def test_create_cloned_volume_by_thin_snapshot(self, mock_extend): self.configuration.lvm_type = 'thin' fake_vg = mock.Mock( fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default')) lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, vg_obj=fake_vg, db=db) fake_volume = tests_utils.create_volume(self.context, size=1) fake_new_volume = tests_utils.create_volume(self.context, size=2) lvm_driver.create_cloned_volume(fake_new_volume, fake_volume) fake_vg.create_lv_snapshot.assert_called_once_with( fake_new_volume['name'], fake_volume['name'], 'thin') mock_extend.assert_called_once_with(fake_new_volume, 2) fake_vg.activate_lv.assert_called_once_with(fake_new_volume['name'], is_snapshot=True, permanent=True) def test_lvm_migrate_volume_no_loc_info(self): host = {'capabilities': {}} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_bad_loc_info(self): capabilities = {'location_info': 'foo'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_driver(self): capabilities = {'location_info': 'FooDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_host(self): capabilities = {'location_info': 'LVMVolumeDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_in_use(self): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'} moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) def test_lvm_migrate_volume_same_volume_group(self, vgs): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.migrate_volume, self.context, vol, host) @mock.patch.object(lvm.LVMVolumeDriver, '_create_volume') @mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes') @mock.patch.object(brick_lvm.LVM, 'delete') @mock.patch.object(volutils, 'copy_volume', side_effect=processutils.ProcessExecutionError) @mock.patch.object(volutils, 'get_all_volume_groups', return_value=[{ 'name': 'cinder-volumes' }]) def test_lvm_migrate_volume_volume_copy_error(self, vgs, copy_volume, mock_delete, mock_pvs, mock_create): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes-old', False, None, 'default') self.assertRaises(processutils.ProcessExecutionError, self.volume.driver.migrate_volume, self.context, vol, host) mock_delete.assert_called_once_with(vol) def test_lvm_volume_group_missing(self): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-3:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} def get_all_volume_groups(): return [{'name': 'cinder-volumes-2'}] self.stubs.Set(volutils, 'get_all_volume_groups', get_all_volume_groups) self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') moved, model_update = self.volume.driver.migrate_volume( self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_proceed(self): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(self.volume.driver, '_execute') \ as mock_execute, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volutils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(self.volume.driver, '_delete_volume'): self.volume.driver.vg = fake_lvm.FakeBrickLVM( 'cinder-volumes', False, None, 'default') moved, model_update = \ self.volume.driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=False) def test_lvm_migrate_volume_proceed_with_thin(self): hostname = socket.gethostname() capabilities = { 'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname } host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(lvm_driver, '_execute') \ as mock_execute, \ mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volutils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(lvm_driver, '_delete_volume'): lvm_driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver._sparse_copy_volume = True moved, model_update = \ lvm_driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=True) @staticmethod def _get_manage_existing_lvs(name): """Helper method used by the manage_existing tests below.""" lvs = [{ 'name': 'fake_lv', 'size': '1.75' }, { 'name': 'fake_lv_bad_size', 'size': 'Not a float' }] for lv in lvs: if lv['name'] == name: return lv def _setup_stubs_for_manage_existing(self): """Helper to set up common stubs for the manage_existing tests.""" self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.stubs.Set(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs) @mock.patch.object(db.sqlalchemy.api, 'volume_get', side_effect=exception.VolumeNotFound( volume_id='d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')) def test_lvm_manage_existing_not_found(self, mock_vol_get): self._setup_stubs_for_manage_existing() vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': 'fake_lv'} vol = {'name': vol_name, 'id': fake.VOLUME_ID, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) @mock.patch.object(db.sqlalchemy.api, 'volume_get') def test_lvm_manage_existing_already_managed(self, mock_conf): self._setup_stubs_for_manage_existing() mock_conf.volume_name_template = 'volume-%s' vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': vol_name} vol = {'name': 'test', 'id': 1, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): self.assertRaises(exception.ManageExistingAlreadyManaged, self.volume.driver.manage_existing, vol, ref) def test_lvm_manage_existing(self): """Good pass on managing an LVM volume. This test case ensures that, when a logical volume with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(vol['name'], new_name) self.stubs.Set(self.volume.driver.vg, 'rename_volume', _rename_volume) size = self.volume.driver.manage_existing_get_size(vol, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 2} self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_bad_ref(self): """Error case where specified LV doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a volume that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} vol = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'} self.assertRaises(exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_snapshot(self): """Good pass on managing an LVM snapshot. This test case ensures that, when a logical volume's snapshot with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing_snapshot, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(snp['name'], new_name) with mock.patch.object(self.volume.driver.vg, 'rename_volume') as \ mock_rename_volume: mock_rename_volume.return_value = _rename_volume size = self.volume.driver.manage_existing_snapshot_get_size( snp, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing_snapshot( snp, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_snapshot_bad_ref(self): """Error case where specified LV snapshot doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a snapshot that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} snp = { 'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 0, 'status': 'available', } self.assertRaises(exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_lvm_manage_existing_snapshot_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing_snapshot code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 2} self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_lvm_unmanage(self): volume = tests_utils.create_volume(self.context, status='available', size=1, host=CONF.host) ret = self.volume.driver.unmanage(volume) self.assertIsNone(ret) # Global setting, LVM setting, expected outcome @ddt.data((10.0, 2.0, 2.0)) @ddt.data((10.0, None, 10.0)) @ddt.unpack def test_lvm_max_over_subscription_ratio(self, global_value, lvm_value, expected_value): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.max_over_subscription_ratio = global_value configuration.lvm_max_over_subscription_ratio = lvm_value fake_vg = mock.Mock( fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default')) lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=fake_vg, db=db) self.assertEqual(expected_value, lvm_driver.configuration.max_over_subscription_ratio)
def return_volume_nonexistent(context, volume_id): raise exception.VolumeNotFound('bogus test message')
def return_empty_volume_metadata(context, volume_id): if volume_id == fake.WILL_NOT_BE_FOUND_ID: raise exc.VolumeNotFound(volume_id) return {}
def return_empty_container_metadata(context, volume_id, metadata, delete, meta_type): if volume_id == fake.WILL_NOT_BE_FOUND_ID: raise exc.VolumeNotFound(volume_id) return {}
def delete_volume_metadata(context, volume_id, key, meta_type): if volume_id == fake.WILL_NOT_BE_FOUND_ID: raise exc.VolumeNotFound(volume_id) pass
def stub_volume_get_notfound(self, context, volume_id, viewable_admin_meta=False): raise exc.VolumeNotFound(volume_id)
def volume_get(self, context, volume_id, viewable_admin_meta=False): if volume_id == 'fake_volume_id': return {'id': 'fake_volume_id', 'name': 'fake_volume_name', 'host': 'fake_host'} raise exception.VolumeNotFound(volume_id=volume_id)