def test_get_volume_attachment(self): # Create a server srv_name = data_utils.rand_name(self.__class__.__name__ + '-Instance') server = self.create_server(name=srv_name, wait_until='ACTIVE') # Verify that a volume's attachment information is retrieved self.client.attach_volume(self.volume['id'], instance_uuid=server['id'], mountpoint='/dev/%s' % CONF.compute.volume_device_name) waiters.wait_for_volume_status(self.client, self.volume['id'], 'in-use') # NOTE(gfidente): added in reverse order because functions will be # called in reverse order to the order they are added (LIFO) self.addCleanup(waiters.wait_for_volume_status, self.client, self.volume['id'], 'available') self.addCleanup(self.client.detach_volume, self.volume['id']) volume = self.client.show_volume(self.volume['id'])['volume'] self.assertIn('attachments', volume) attachment = volume['attachments'][0] self.assertEqual('/dev/%s' % CONF.compute.volume_device_name, attachment['device']) self.assertEqual(server['id'], attachment['server_id']) self.assertEqual(self.volume['id'], attachment['id']) self.assertEqual(self.volume['id'], attachment['volume_id'])
def attach_volume(self, server, volume, device=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device self.servers_client.attach_volume(server['id'], **attach_kwargs) # On teardown detach the volume and wait for it to be available. This # is so we don't error out when trying to delete the volume during # teardown. self.addCleanup(waiters.wait_for_volume_status, self.volumes_client, volume['id'], 'available') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.detach_volume, server['id'], volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'in-use')
def resource_setup(cls): super(VolumesV2ActionsTest, cls).resource_setup() # Create a test shared volume for attach/detach tests cls.volume = cls.create_volume() waiters.wait_for_volume_status(cls.client, cls.volume['id'], 'available')
def resource_setup(cls): super(VolumesTestJSON, cls).resource_setup() # Create 3 Volumes cls.volume_list = [] cls.volume_id_list = [] for i in range(3): v_name = data_utils.rand_name('volume') metadata = {'Type': 'work'} try: volume = cls.client.create_volume(size=CONF.volume.volume_size, display_name=v_name, metadata=metadata)['volume'] waiters.wait_for_volume_status(cls.client, volume['id'], 'available') volume = cls.client.show_volume(volume['id'])['volume'] cls.volume_list.append(volume) cls.volume_id_list.append(volume['id']) except Exception: if cls.volume_list: # We could not create all the volumes, though we were able # to create *some* of the volumes. This is typically # because the backing file size of the volume group is # too small. So, here, we clean up whatever we did manage # to create and raise a SkipTest for volume in cls.volume_list: cls.delete_volume(volume['id']) msg = ("Failed to create ALL necessary volumes to run " "test. This typically means that the backing file " "size of the nova-volumes group is too small to " "create the 3 volumes needed by this test case") raise cls.skipException(msg) raise
def test_create_get_list_accept_volume_transfer(self): # Create a volume first volume = self.create_volume() self.addCleanup(self.delete_volume, self.adm_client, volume['id']) # Create a volume transfer transfer = self.client.create_volume_transfer( volume_id=volume['id'])['transfer'] transfer_id = transfer['id'] auth_key = transfer['auth_key'] waiters.wait_for_volume_status(self.client, volume['id'], 'awaiting-transfer') # Get a volume transfer body = self.client.show_volume_transfer(transfer_id)['transfer'] self.assertEqual(volume['id'], body['volume_id']) # List volume transfers, the result should be greater than # or equal to 1 body = self.client.list_volume_transfers()['transfers'] self.assertThat(len(body), matchers.GreaterThan(0)) # Accept a volume transfer by alt_tenant body = self.alt_client.accept_volume_transfer( transfer_id, auth_key=auth_key)['transfer'] waiters.wait_for_volume_status(self.alt_client, volume['id'], 'available')
def test_backup_create_attached_volume(self): """Test backup create using force flag. Cinder allows to create a volume backup, whether the volume status is "available" or "in-use". """ # Create a server volume = self.create_volume() self.addCleanup(self.volumes_client.delete_volume, volume['id']) server = self.create_server(wait_until='ACTIVE') # Attach volume to instance self.servers_client.attach_volume(server['id'], volumeId=volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'in-use') self.addCleanup(waiters.wait_for_volume_status, self.volumes_client, volume['id'], 'available') self.addCleanup(self.servers_client.detach_volume, server['id'], volume['id']) # Create backup using force flag backup_name = data_utils.rand_name(self.__class__.__name__ + '-Backup') backup = self.backups_client.create_backup(volume_id=volume['id'], name=backup_name, force=True)['backup'] self.addCleanup(self.backups_client.delete_backup, backup['id']) waiters.wait_for_backup_status(self.backups_client, backup['id'], 'available') self.assertEqual(backup_name, backup['name'])
def test_volume_type_access_add(self): # Creating a NON public volume type params = {'os-volume-type-access:is_public': False} volume_type = self.create_volume_type(**params) # Try creating a volume from volume type in primary tenant self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume, volume_type=volume_type['id'], size=CONF.volume.volume_size) # Adding volume type access for primary tenant self.admin_volume_types_client.add_type_access( volume_type['id'], project=self.volumes_client.tenant_id) self.addCleanup(self.admin_volume_types_client.remove_type_access, volume_type['id'], project=self.volumes_client.tenant_id) # Creating a volume from primary tenant volume = self.volumes_client.create_volume( volume_type=volume_type['id'], size=CONF.volume.volume_size)['volume'] self.addCleanup(self.delete_volume, self.volumes_client, volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'available') # Validating the created volume is based on the volume type self.assertEqual(volume_type['name'], volume['volume_type'])
def test_volume_create_get_delete(self): # CREATE, GET, DELETE Volume volume = None v_name = data_utils.rand_name('Volume') metadata = {'Type': 'work'} # Create volume volume = self.client.create_volume(size=CONF.volume.volume_size, display_name=v_name, metadata=metadata)['volume'] self.addCleanup(self.delete_volume, volume['id']) self.assertIn('id', volume) self.assertIn('displayName', volume) self.assertEqual(volume['displayName'], v_name, "The created volume name is not equal " "to the requested name") self.assertTrue(volume['id'] is not None, "Field volume id is empty or not found.") # Wait for Volume status to become ACTIVE waiters.wait_for_volume_status(self.client, volume['id'], 'available') # GET Volume fetched_volume = self.client.show_volume(volume['id'])['volume'] # Verification of details of fetched Volume self.assertEqual(v_name, fetched_volume['displayName'], 'The fetched Volume is different ' 'from the created Volume') self.assertEqual(volume['id'], fetched_volume['id'], 'The fetched Volume is different ' 'from the created Volume') self.assertThat(fetched_volume['metadata'].items(), matchers.ContainsAll(metadata.items()), 'The fetched Volume metadata misses data ' 'from the created Volume')
def _volume_clean_up(self, server_id, volume_id): body = self.volumes_client.show_volume(volume_id)['volume'] if body['status'] == 'in-use': self.servers_client.detach_volume(server_id, volume_id) waiters.wait_for_volume_status(self.volumes_client, volume_id, 'available') self.volumes_client.delete_volume(volume_id)
def resource_setup(cls): super(VolumesTestJSON, cls).resource_setup() # Create 3 Volumes cls.volume_list = [] cls.volume_id_list = [] for i in range(3): v_name = data_utils.rand_name(cls.__name__ + '-volume') metadata = {'Type': 'work'} try: volume = cls.client.create_volume(size=CONF.volume.volume_size, display_name=v_name, metadata=metadata)['volume'] waiters.wait_for_volume_status(cls.client, volume['id'], 'available') volume = cls.client.show_volume(volume['id'])['volume'] cls.volume_list.append(volume) cls.volume_id_list.append(volume['id']) except Exception as exc: LOG.exception(exc) if cls.volume_list: # We could not create all the volumes, though we were able # to create *some* of the volumes. This is typically # because the backing file size of the volume group is # too small. for volume in cls.volume_list: cls.delete_volume(volume['id']) raise exc
def _create_volume_from_image(cls): clients = cls.os image_id = CONF.compute.image_ref volume_name = data_utils.rand_name('volume') volumes_client = clients.volumes_v2_client if CONF.volume_feature_enabled.api_v1: volumes_client = clients.volumes_client volume = volumes_client.create_volume( display_name=volume_name, imageRef=image_id, volume_type=CONF.volume.aws_volume_type, availability_zone=CONF.volume.aws_availability_zone) waiters.wait_for_volume_status(volumes_client, volume['volume']['id'], 'available') LOG.warning('volume: %s' % volume) kwargs = {} boot_volume = { 'uuid': volume['volume']['id'], 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': True, 'volume_size': 1 } bd_map_v2 = [] bd_map_v2.append(boot_volume) kwargs['block_device_mapping_v2'] = bd_map_v2 LOG.warning('bdm_v2: %s' % kwargs) return kwargs
def test_volume_swap(self): # Create two volumes. # NOTE(gmann): Volumes are created before server creation so that # volumes cleanup can happen successfully irrespective of which volume # is attached to server. volume1 = self.create_volume() volume2 = self.create_volume() # Boot server server = self.create_test_server(wait_until='ACTIVE') # Attach "volume1" to server self.attach_volume(server, volume1) # Swap volume from "volume1" to "volume2" self.servers_admin_client.update_attached_volume( server['id'], volume1['id'], volumeId=volume2['id']) waiters.wait_for_volume_status(self.volumes_client, volume1['id'], 'available') waiters.wait_for_volume_status(self.volumes_client, volume2['id'], 'in-use') self.addCleanup(self.servers_client.detach_volume, server['id'], volume2['id']) # Verify "volume2" is attached to the server vol_attachments = self.servers_client.list_volume_attachments( server['id'])['volumeAttachments'] self.assertEqual(1, len(vol_attachments)) self.assertIn(volume2['id'], vol_attachments[0]['volumeId'])
def _create_type_and_volume(self, backend_name_key, with_prefix): # Volume/Type creation type_name = data_utils.rand_name('Type') vol_name = data_utils.rand_name('Volume') spec_key_with_prefix = "capabilities:volume_backend_name" spec_key_without_prefix = "volume_backend_name" if with_prefix: extra_specs = {spec_key_with_prefix: backend_name_key} else: extra_specs = {spec_key_without_prefix: backend_name_key} self.type = self.volume_types_client.create_volume_type( name=type_name, extra_specs=extra_specs)['volume_type'] self.volume_type_id_list.append(self.type['id']) params = {self.name_field: vol_name, 'volume_type': type_name} self.volume = self.admin_volume_client.create_volume( **params)['volume'] if with_prefix: self.volume_id_list_with_prefix.append(self.volume['id']) else: self.volume_id_list_without_prefix.append( self.volume['id']) waiters.wait_for_volume_status(self.admin_volume_client, self.volume['id'], 'available')
def test_consistencygroup_cgsnapshot_create_delete(self): # Create volume type name = data_utils.rand_name("volume-type") volume_type = self.admin_volume_types_client.create_volume_type( name=name)['volume_type'] # Create CG cg_name = data_utils.rand_name('CG') create_consistencygroup = ( self.consistencygroups_adm_client.create_consistencygroup) cg = create_consistencygroup(volume_type['id'], name=cg_name)['consistencygroup'] vol_name = data_utils.rand_name("volume") self.name_field = self.special_fields['name_field'] params = { self.name_field: vol_name, 'volume_type': volume_type['id'], 'consistencygroup_id': cg['id'], 'size': CONF.volume.volume_size } # Create volume volume = self.admin_volume_client.create_volume(**params)['volume'] waiters.wait_for_volume_status(self.admin_volume_client, volume['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg['id'], 'available') self.assertEqual(cg_name, cg['name']) # Create cgsnapshot cgsnapshot_name = data_utils.rand_name('cgsnapshot') create_cgsnapshot = ( self.consistencygroups_adm_client.create_cgsnapshot) cgsnapshot = create_cgsnapshot(cg['id'], name=cgsnapshot_name)['cgsnapshot'] snapshots = self.admin_snapshots_client.list_snapshots( detail=True)['snapshots'] for snap in snapshots: if volume['id'] == snap['volume_id']: waiters.wait_for_snapshot_status(self.admin_snapshots_client, snap['id'], 'available') self.consistencygroups_adm_client.wait_for_cgsnapshot_status( cgsnapshot['id'], 'available') self.assertEqual(cgsnapshot_name, cgsnapshot['name']) # Get a given CG snapshot cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot( cgsnapshot['id'])['cgsnapshot'] self.assertEqual(cgsnapshot_name, cgsnapshot['name']) # Get all CG snapshots with detail cgsnapshots = self.consistencygroups_adm_client.list_cgsnapshots( detail=True)['cgsnapshots'] self.assertIn((cgsnapshot['name'], cgsnapshot['id']), [(m['name'], m['id']) for m in cgsnapshots]) # Clean up self._delete_cgsnapshot(cgsnapshot['id'], cg['id']) self._delete_consistencygroup(cg['id']) self.admin_volume_types_client.delete_volume_type(volume_type['id'])
def test_rebuild_server_with_volume_attached(self): # create a new volume and attach it to the server volume = self.volumes_client.create_volume( size=CONF.volume.volume_size) volume = volume['volume'] self.addCleanup(self.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'available') self.client.attach_volume(self.server_id, volumeId=volume['id']) self.addCleanup(waiters.wait_for_volume_status, self.volumes_client, volume['id'], 'available') self.addCleanup(self.client.detach_volume, self.server_id, volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'in-use') # run general rebuild test self.test_rebuild_server() # make sure the volume is attached to the instance after rebuild vol_after_rebuild = self.volumes_client.show_volume(volume['id']) vol_after_rebuild = vol_after_rebuild['volume'] self.assertEqual('in-use', vol_after_rebuild['status']) self.assertEqual(self.server_id, vol_after_rebuild['attachments'][0]['server_id'])
def test_volume_backup_create_get_detailed_list_restore_delete(self): # Create backup backup_name = data_utils.rand_name('Backup') create_backup = self.backups_adm_client.create_backup backup = create_backup(volume_id=self.volume['id'], name=backup_name)['backup'] self.addCleanup(self.backups_adm_client.delete_backup, backup['id']) self.assertEqual(backup_name, backup['name']) waiters.wait_for_volume_status(self.admin_volume_client, self.volume['id'], 'available') self.backups_adm_client.wait_for_backup_status(backup['id'], 'available') # Get a given backup backup = self.backups_adm_client.show_backup(backup['id'])['backup'] self.assertEqual(backup_name, backup['name']) # Get all backups with detail backups = self.backups_adm_client.list_backups(detail=True)['backups'] self.assertIn((backup['name'], backup['id']), [(m['name'], m['id']) for m in backups]) # Restore backup restore = self.backups_adm_client.restore_backup( backup['id'])['restore'] # Delete backup self.addCleanup(self.admin_volume_client.delete_volume, restore['volume_id']) self.assertEqual(backup['id'], restore['backup_id']) self.backups_adm_client.wait_for_backup_status(backup['id'], 'available') waiters.wait_for_volume_status(self.admin_volume_client, restore['volume_id'], 'available')
def test_volume_snapshot_create_get_list_delete(self): v_name = data_utils.rand_name('Volume') volume = self.volumes_client.create_volume( size=CONF.volume.volume_size, display_name=v_name)['volume'] self.addCleanup(self.delete_volume, volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'available') s_name = data_utils.rand_name('Snapshot') # Create snapshot snapshot = self.snapshots_client.create_snapshot( volume_id=volume['id'], display_name=s_name)['snapshot'] def delete_snapshot(snapshot_id): waiters.wait_for_snapshot_status(self.snapshots_client, snapshot_id, 'available') # Delete snapshot self.snapshots_client.delete_snapshot(snapshot_id) self.snapshots_client.wait_for_resource_deletion(snapshot_id) self.addCleanup(delete_snapshot, snapshot['id']) self.assertEqual(volume['id'], snapshot['volumeId']) # Get snapshot fetched_snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] self.assertEqual(s_name, fetched_snapshot['displayName']) self.assertEqual(volume['id'], fetched_snapshot['volumeId']) # Fetch all snapshots snapshots = self.snapshots_client.list_snapshots()['snapshots'] self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))
def test_volume_backup_create_get_detailed_list_restore_delete(self): # Create backup volume = self.create_volume() self.addCleanup(self.volumes_client.delete_volume, volume['id']) backup_name = data_utils.rand_name( self.__class__.__name__ + '-Backup') description = data_utils.rand_name("volume-backup-description") backup = self.create_backup(volume_id=volume['id'], name=backup_name, description=description) self.assertEqual(backup_name, backup['name']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'available') # Get a given backup backup = self.backups_client.show_backup(backup['id'])['backup'] self.assertEqual(backup_name, backup['name']) self.assertEqual(description, backup['description']) # Get all backups with detail backups = self.backups_client.list_backups( detail=True)['backups'] self.assertIn((backup['name'], backup['id']), [(m['name'], m['id']) for m in backups]) self.restore_backup(backup['id'])
def test_backup_create_attached_volume(self): """Test backup create using force flag. Cinder allows to create a volume backup, whether the volume status is "available" or "in-use". """ # Create a server volume = self.create_volume() self.addCleanup(self.volumes_client.delete_volume, volume['id']) server_name = data_utils.rand_name( self.__class__.__name__ + '-instance') server = self.create_server(name=server_name, wait_until='ACTIVE') self.addCleanup(self.servers_client.delete_server, server['id']) # Attach volume to instance self.servers_client.attach_volume(server['id'], volumeId=volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'in-use') self.addCleanup(waiters.wait_for_volume_status, self.volumes_client, volume['id'], 'available') self.addCleanup(self.servers_client.detach_volume, server['id'], volume['id']) # Create backup using force flag backup_name = data_utils.rand_name( self.__class__.__name__ + '-Backup') backup = self.backups_client.create_backup( volume_id=volume['id'], name=backup_name, force=True)['backup'] self.addCleanup(self.backups_client.delete_backup, backup['id']) self.backups_client.wait_for_backup_status(backup['id'], 'available') self.assertEqual(backup_name, backup['name'])
def test_volume_snapshot_create_get_list_delete(self): v_name = data_utils.rand_name('Volume') volume = self.volumes_client.create_volume( size=CONF.volume.volume_size, display_name=v_name)['volume'] self.addCleanup(self.delete_volume, volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'available') s_name = data_utils.rand_name('Snapshot') # Create snapshot snapshot = self.snapshots_client.create_snapshot( volume['id'], display_name=s_name)['snapshot'] def delete_snapshot(snapshot_id): waiters.wait_for_snapshot_status(self.snapshots_client, snapshot_id, 'available') # Delete snapshot self.snapshots_client.delete_snapshot(snapshot_id) self.snapshots_client.wait_for_resource_deletion(snapshot_id) self.addCleanup(delete_snapshot, snapshot['id']) self.assertEqual(volume['id'], snapshot['volumeId']) # Get snapshot fetched_snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] self.assertEqual(s_name, fetched_snapshot['displayName']) self.assertEqual(volume['id'], fetched_snapshot['volumeId']) # Fetch all snapshots snapshots = self.snapshots_client.list_snapshots()['snapshots'] self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))
def _create_user_message(self): """Trigger a 'no valid host' situation to generate a message.""" bad_protocol = data_utils.rand_name('storage_protocol') bad_vendor = data_utils.rand_name('vendor_name') extra_specs = {'storage_protocol': bad_protocol, 'vendor_name': bad_vendor} vol_type_name = data_utils.rand_name( self.__class__.__name__ + '-volume-type') bogus_type = self.admin_volume_types_client.create_volume_type( name=vol_type_name, extra_specs=extra_specs)['volume_type'] self.addCleanup(self.admin_volume_types_client.delete_volume_type, bogus_type['id']) params = {'volume_type': bogus_type['id'], 'size': CONF.volume.volume_size} volume = self.volumes_client.create_volume(**params)['volume'] self.addCleanup(self.delete_volume, self.volumes_client, volume['id']) try: waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'error') except exceptions.VolumeBuildErrorException: # Error state is expected and desired pass messages = self.messages_client.list_messages()['messages'] message_id = None for message in messages: if message['resource_uuid'] == volume['id']: message_id = message['id'] break self.assertIsNotNone(message_id, 'No user message generated for ' 'volume %s' % volume['id']) return message_id
def test_volume_list_param_tenant(self): # Test to list volumes from single tenant # Create a volume in admin tenant adm_vol = self.admin_volume_vmware_client.create_volume()['volume'] waiters.wait_for_volume_status(self.admin_volume_vmware_client, adm_vol['id'], 'available') self.addCleanup(self.admin_volume_vmware_client.delete_volume, adm_vol['id']) params = { 'all_tenants': 1, 'project_id': self.volumes_vmware_client.tenant_id } # Getting volume list from primary tenant using admin credentials fetched_list = self.admin_volume_vmware_client.list_volumes( detail=True, params=params)['volumes'] # Verifying fetched volume ids list is related to primary tenant fetched_list_ids = map(operator.itemgetter('id'), fetched_list) expected_list_ids = map(operator.itemgetter('id'), self.volume_list) self.assertEqual(sorted(expected_list_ids), sorted(fetched_list_ids)) # Verifying tenant id of volumes fetched list is related to # primary tenant fetched_tenant_id = [ operator.itemgetter('os-vol-tenant-attr:tenant_id')(item) for item in fetched_list ] expected_tenant_id = [self.volumes_vmware_client.tenant_id] * 3 self.assertEqual(expected_tenant_id, fetched_tenant_id)
def _create_and_attach(self): # Start a server and wait for it to become ready self.admin_pass = self.image_ssh_password self.server = self.create_test_server( validatable=True, wait_until='ACTIVE', adminPass=self.admin_pass, availability_zone=CONF.compute.aws_availability_zone) # Record addresses so that we can ssh later self.server['addresses'] = self.servers_client.list_addresses( self.server['id'])['addresses'] # Create a volume and wait for it to become ready self.volume = self.volumes_client.create_volume( size=CONF.volume.volume_size, display_name='test', availability_zone=CONF.compute.aws_availability_zone)['volume'] self.addCleanup(self._delete_volume) waiters.wait_for_volume_status(self.volumes_client, self.volume['id'], 'available') # Attach the volume to the server self.attachment = self.servers_client.attach_volume( self.server['id'], volumeId=self.volume['id'], device='/dev/%s' % self.device)['volumeAttachment'] waiters.wait_for_volume_status(self.volumes_client, self.volume['id'], 'in-use') self.addCleanup(self._detach, self.server['id'], self.volume['id'])
def _create_and_attach(self): # Start a server and wait for it to become ready self.admin_pass = self.image_ssh_password self.server = self.create_test_server( validatable=True, wait_until='ACTIVE', adminPass=self.admin_pass) # Record addresses so that we can ssh later self.server['addresses'] = self.servers_client.list_addresses( self.server['id'])['addresses'] # Create a volume and wait for it to become ready self.volume = self.volumes_client.create_volume( size=CONF.volume.volume_size, display_name='test')['volume'] self.addCleanup(self._delete_volume) waiters.wait_for_volume_status(self.volumes_client, self.volume['id'], 'available') # Attach the volume to the server self.attachment = self.servers_client.attach_volume( self.server['id'], volumeId=self.volume['id'], device='/dev/%s' % self.device)['volumeAttachment'] waiters.wait_for_volume_status(self.volumes_client, self.volume['id'], 'in-use') self.addCleanup(self._detach, self.server['id'], self.volume['id'])
def attach_volume(self, server, volume, device=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device self.servers_client.attach_volume( server['id'], **attach_kwargs) # On teardown detach the volume and wait for it to be available. This # is so we don't error out when trying to delete the volume during # teardown. self.addCleanup(waiters.wait_for_volume_status, self.volumes_client, volume['id'], 'available') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(test_utils.call_and_ignore_notfound_exc, self.servers_client.detach_volume, server['id'], volume['id']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'in-use')
def _create_volume_from_image(cls): clients = cls.os image_id = CONF.compute.image_ref volume_name = data_utils.rand_name('volume') volumes_client = clients.volumes_v2_client if CONF.volume_feature_enabled.api_v1: volumes_client = clients.volumes_client volume = volumes_client.create_volume( display_name=volume_name, imageRef=image_id, volume_type=CONF.volume.aws_volume_type, availability_zone=CONF.volume.aws_availability_zone) waiters.wait_for_volume_status(volumes_client, volume['volume']['id'], 'available') LOG.warning('volume: %s' % volume) kwargs = {} boot_volume = { 'uuid': volume['volume']['id'], 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0, 'delete_on_termination': True, 'volume_size': 1} bd_map_v2 = [] bd_map_v2.append(boot_volume) kwargs['block_device_mapping_v2'] = bd_map_v2 LOG.warning('bdm_v2: %s' % kwargs) return kwargs
def _create_type_and_volume(cls, backend_name_key, with_prefix): # Volume/Type creation type_name = data_utils.rand_name(cls.__name__ + '-Type') vol_name = data_utils.rand_name(cls.__name__ + '-Volume') spec_key_with_prefix = "capabilities:volume_backend_name" spec_key_without_prefix = "volume_backend_name" if with_prefix: extra_specs = {spec_key_with_prefix: backend_name_key} else: extra_specs = {spec_key_without_prefix: backend_name_key} cls.type = cls.create_volume_type(name=type_name, extra_specs=extra_specs) params = { cls.name_field: vol_name, 'volume_type': type_name, 'size': CONF.volume.volume_size } cls.volume = cls.admin_volume_client.create_volume(**params)['volume'] if with_prefix: cls.volume_id_list_with_prefix.append(cls.volume['id']) else: cls.volume_id_list_without_prefix.append(cls.volume['id']) waiters.wait_for_volume_status(cls.admin_volume_client, cls.volume['id'], 'available')
def test_create_failover_reverse_enable_disable_delete_replication(self): name = data_utils.rand_name('tempest-replication') master_vol = self.local_volid_list[0] slave_vol = self.replication_volid_list[0] kwargs = { 'name': name, 'description': 'this replication is for jenkins test.', } replication = self.client.create_replication(master_vol, slave_vol, **kwargs)['replication'] LOG.info("@@@create replication %s" % (replication['id'])) sgs_waiters.wait_for_sgs_replication_status(self.client, replication['id'], 'enabled') self.client.failover_replication(replication['id']) LOG.info("@@@failover replication %s" % (replication['id'])) sgs_waiters.wait_for_sgs_replication_status(self.client, replication['id'], 'failed-over') self.client.reverse_replication(replication['id']) LOG.info("@@@reverse replication %s" % (replication['id'])) sgs_waiters.wait_for_sgs_replication_status(self.client, replication['id'], 'disabled') LOG.info("@@@attach replication volume:%s" % (slave_vol)) self.sgs_volume_client.attach_volume(self.replication_test_vm_id, slave_vol) LOG.info("@@@detach local volume:%s" % (master_vol)) self.sgs_volume_client.detach_volume(self.local_test_vm_id, master_vol) waiters.wait_for_volume_status(self.sgs_volume_client, slave_vol, 'in-use') waiters.wait_for_volume_status(self.sgs_volume_client, master_vol,'enabled') self.client.enable_replication(replication['id']) LOG.info("@@@enable replication %s" % (replication['id'])) sgs_waiters.wait_for_sgs_replication_status(self.client, replication['id'], 'enabled') self.client.disable_replication(replication['id']) LOG.info("@@@disable replication %s" % (replication['id'])) sgs_waiters.wait_for_sgs_replication_status(self.client, replication['id'], 'disabled') self.client.delete_replication(replication['id']) LOG.info("@@@delete replication %s" % (replication['id'])) self.client.wait_for_resource_deletion(replication['id'])
def test_volume_create_get_delete(self): # CREATE, GET, DELETE Volume volume = None v_name = data_utils.rand_name("Volume") metadata = {"Type": "work"} # Create volume volume = self.client.create_volume(size=CONF.volume.volume_size, display_name=v_name, metadata=metadata) self.addCleanup(self.delete_volume, volume["id"]) self.assertIn("id", volume) self.assertIn("displayName", volume) self.assertEqual(volume["displayName"], v_name, "The created volume name is not equal " "to the requested name") self.assertTrue(volume["id"] is not None, "Field volume id is empty or not found.") # Wait for Volume status to become ACTIVE waiters.wait_for_volume_status(self.client, volume["id"], "available") # GET Volume fetched_volume = self.client.show_volume(volume["id"]) # Verification of details of fetched Volume self.assertEqual( v_name, fetched_volume["displayName"], "The fetched Volume is different " "from the created Volume" ) self.assertEqual( volume["id"], fetched_volume["id"], "The fetched Volume is different " "from the created Volume" ) self.assertThat( fetched_volume["metadata"].items(), matchers.ContainsAll(metadata.items()), "The fetched Volume metadata misses data " "from the created Volume", )
def _create_volume(self): volume = self.volumes_extensions_client.create_volume( CONF.volume.volume_size, display_name=data_utils.rand_name( self.__class__.__name__ + '_volume')) self.addCleanup(self.delete_volume, volume['id']) waiters.wait_for_volume_status(self.volumes_extensions_client, volume['id'], 'available') return volume
def test_create_volume_burn_in(self): name = data_utils.rand_name('volume') volume = self.volumes_client.create_volume( size=CONF.volume.volume_size, display_name=name)['volume'] self.volumes.append(volume) self.assertEqual(name, volume['display_name']) waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'available')
def _create_temp_volume(self): # Create a temp volume for force delete tests vol_name = utils.rand_name('Volume') params = {self.name_field: vol_name} temp_volume = self.client.create_volume(**params)['volume'] waiters.wait_for_volume_status(self.client, temp_volume['id'], 'available') return temp_volume
def test_volume_extend(self): # Extend Volume Test. self.volume = self.create_volume() extend_size = int(self.volume['size']) + 1 self.client.extend_volume(self.volume['id'], new_size=extend_size) waiters.wait_for_volume_status(self.client, self.volume['id'], 'available') volume = self.client.show_volume(self.volume['id'])['volume'] self.assertEqual(int(volume['size']), extend_size)
def test_consistencygroup_cgsnapshot_create_delete(self): # Create volume type name = data_utils.rand_name("volume-type") volume_type = self.admin_volume_types_client.create_volume_type( name=name)['volume_type'] # Create CG cg_name = data_utils.rand_name('CG') create_consistencygroup = ( self.consistencygroups_adm_client.create_consistencygroup) cg = create_consistencygroup(volume_type['id'], name=cg_name)['consistencygroup'] vol_name = data_utils.rand_name("volume") self.name_field = self.special_fields['name_field'] params = {self.name_field: vol_name, 'volume_type': volume_type['id'], 'consistencygroup_id': cg['id'], 'size': CONF.volume.volume_size} # Create volume volume = self.admin_volume_client.create_volume(**params)['volume'] waiters.wait_for_volume_status(self.admin_volume_client, volume['id'], 'available') self.consistencygroups_adm_client.wait_for_consistencygroup_status( cg['id'], 'available') self.assertEqual(cg_name, cg['name']) # Create cgsnapshot cgsnapshot_name = data_utils.rand_name('cgsnapshot') create_cgsnapshot = ( self.consistencygroups_adm_client.create_cgsnapshot) cgsnapshot = create_cgsnapshot(cg['id'], name=cgsnapshot_name)['cgsnapshot'] snapshots = self.admin_snapshots_client.list_snapshots( detail=True)['snapshots'] for snap in snapshots: if volume['id'] == snap['volume_id']: waiters.wait_for_snapshot_status(self.admin_snapshots_client, snap['id'], 'available') self.consistencygroups_adm_client.wait_for_cgsnapshot_status( cgsnapshot['id'], 'available') self.assertEqual(cgsnapshot_name, cgsnapshot['name']) # Get a given CG snapshot cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot( cgsnapshot['id'])['cgsnapshot'] self.assertEqual(cgsnapshot_name, cgsnapshot['name']) # Get all CG snapshots with detail cgsnapshots = self.consistencygroups_adm_client.list_cgsnapshots( detail=True)['cgsnapshots'] self.assertIn((cgsnapshot['name'], cgsnapshot['id']), [(m['name'], m['id']) for m in cgsnapshots]) # Clean up self._delete_cgsnapshot(cgsnapshot['id'], cg['id']) self._delete_consistencygroup(cg['id']) self.admin_volume_types_client.delete_volume_type(volume_type['id'])
def test_attach_detach_volume_to_instance(self): # Volume is attached and detached successfully from an instance mountpoint = '/dev/vdc' self.client.attach_volume(self.volume['id'], instance_uuid=self.server['id'], mountpoint=mountpoint) waiters.wait_for_volume_status(cls.client, self.volume['id'], 'in-use') self.client.detach_volume(self.volume['id']) waiters.wait_for_volume_status(cls.client, self.volume['id'], 'available')
def _detach_volume(self, server_id, volume_id): try: self.servers_client.detach_volume(server_id, volume_id) waiters.wait_for_volume_status(self.volumes_client, volume_id, 'available') except lib_exc.NotFound: LOG.warning("Unable to detach volume %s from server %s " "possibly it was already detached" % (volume_id, server_id))
def _create_temp_volume(self): # Create a temp volume for force delete tests vol_name = utils.rand_name(self.__class__.__name__ + '-Volume') params = {self.name_field: vol_name} temp_volume = self.client.create_volume(**params)['volume'] waiters.wait_for_volume_status(self.client, temp_volume['id'], 'available') return temp_volume
def test_attach_volume_to_server_burn_in(self): self.attachment = self.servers_client.attach_volume( self.servers[0]['id'], volumeId=self.volumes[0]['id'], device='/dev/%s' % self.device)['volumeAttachment'] waiters.wait_for_volume_status(self.volumes_client, self.volumes[0]['id'], 'in-use') self.addCleanup(self._detach, self.servers[0]['id'], self.volumes[0]['id'])
def _detach_volume(self, server_id, volume_id): try: self.servers_client.detach_volume(server_id, volume_id) waiters.wait_for_volume_status(self.volumes_client, volume_id, 'available') except lib_exc.NotFound: LOG.warning( "Unable to detach volume %s from server %s " "possibly it was already detached", volume_id, server_id)
def prepare_replication_sg_volumes(cls): volume = cls.create_replication_volume() volume_id = volume['id'] LOG.info("@@@enable volume id:%s" % (volume_id)) cls.sgs_volume_client.enable_volume(volume_id) waiters.wait_for_volume_status(cls.sgs_volume_client, volume_id, 'enabled') #replication volume should not be attached? return volume
def resource_setup(cls): super(ResourcesV1TestJSON, cls).resource_setup() # cls.keypair_ref = CONF.conveyor.origin_keypair_ref cls.secgroup_ref = CONF.conveyor.origin_security_group_ref cls.net_ref = CONF.conveyor.origin_net_ref # cls.public_net_ref = CONF.conveyor.public_net_ref cls.floating_ip_pool_ref = CONF.conveyor.floating_ip_pool_ref # cls.subnet_ref = CONF.conveyor.origin_subnet_ref cls.image_ref = CONF.conveyor.image_ref cls.flavor_ref = CONF.conveyor.flavor_ref cls.availability_zone_ref = CONF.conveyor.availability_zone cls.volume_size = CONF.conveyor.volume_size cls.volume_type_ref = CONF.conveyor.volume_type cls.meta = {'hello': 'world'} cls.name = data_utils.rand_name('server') cls.password = data_utils.rand_password() networks = [{'uuid': cls.net_ref}] server_initial = cls.create_server( networks=networks, wait_until='ACTIVE', # name=cls.name, name="server_resource", metadata=cls.meta, adminPass=cls.password, # key_name=key_name, # security_groups=cls.secgroup_ref, availability_zone=cls.availability_zone_ref) cls.server = (cls.servers_client.show_server( server_initial['id'])['server']) cls.servers.append(cls.server) cls.volume = cls.volumes_client.create_volume( size=cls.volume_size, # display_name='volume01', display_name='volume_resource', availability_zone=cls.availability_zone_ref, volume_type=cls.volume_type_ref)['volume'] cls.volumes.append(cls.volume) waiters.wait_for_volume_status(cls.volumes_client, cls.volume['id'], 'available') # Attach the volume to the server cls.servers_client.attach_volume( server_initial['id'], volumeId=cls.volume['id'])['volumeAttachment'] waiters.wait_for_volume_status(cls.volumes_client, cls.volume['id'], 'in-use') cls.server = (cls.servers_client.show_server( server_initial['id'])['server'])
def test_volume_crud_with_volume_type_and_extra_specs(self): # Create/update/get/delete volume with volume_type and extra spec. volume_types = list() vol_name = data_utils.rand_name(self.__class__.__name__ + '-volume') self.name_field = self.special_fields['name_field'] proto = CONF.volume.storage_protocol vendor = CONF.volume.vendor_name volume_backend_name = CONF.volume_vmware.volume_backend_name extra_specs = {"volume_backend_name": volume_backend_name} # Create two volume_types for i in range(2): vol_type_name = data_utils.rand_name(self.__class__.__name__ + '-volume-type') vol_type = self.create_volume_type(name=vol_type_name, extra_specs=extra_specs) volume_types.append(vol_type) params = { self.name_field: vol_name, 'volume_type': volume_types[0]['id'] } # Create volume volume = self.volumes_vmware_client.create_volume(**params)['volume'] self.addCleanup(self.delete_volume, self.volumes_vmware_client, volume['id']) self.assertEqual(volume_types[0]['name'], volume["volume_type"]) self.assertEqual( volume[self.name_field], vol_name, "The created volume name is not equal " "to the requested name") self.assertIsNotNone(volume['id'], "Field volume id is empty or not found.") waiters.wait_for_volume_status(self.volumes_vmware_client, volume['id'], 'available') # Update volume with new volume_type self.volumes_vmware_client.retype_volume( volume['id'], new_type=volume_types[1]['id']) waiters.wait_for_volume_status(self.volumes_vmware_client, volume['id'], 'available') # Get volume details and Verify fetched_volume = self.volumes_vmware_client.show_volume( volume['id'])['volume'] self.assertEqual( volume_types[1]['name'], fetched_volume['volume_type'], 'The fetched Volume type is different ' 'from updated volume type') self.assertEqual( vol_name, fetched_volume[self.name_field], 'The fetched Volume is different ' 'from the created Volume') self.assertEqual( volume['id'], fetched_volume['id'], 'The fetched Volume is different ' 'from the created Volume')
def resource_setup(cls): super(VolumesV2ActionsTest, cls).resource_setup() # Create a test shared instance srv_name = data_utils.rand_name(cls.__name__ + '-Instance') cls.server = cls.create_server(name=srv_name, wait_until='ACTIVE') # Create a test shared volume for attach/detach tests cls.volume = cls.create_volume() waiters.wait_for_volume_status(cls.client, cls.volume['id'], 'available')
def test_create_volume_from_snapshot(self): for volume_id in self.local_volid_list: snapshot = self.client.create_snapshot(volume_id)['snapshot'] volume = self.sgs_volume_client.create_volume( snapshot_id=snapshot['id'])['volume'] waiters.wait_for_volume_status(self.volumes_client, volume['id'], 'available') self.volumes_client.delete_volume(volume['id']) self.volumes_client.wait_for_resource_deletion(volume['id']) self.cleanup_snapshot(snapshot)
def test_attach_detach_volume_to_instance(self): # Create a server server = self.create_server(wait_until="ACTIVE") # Volume is attached and detached successfully from an instance self.client.attach_volume( self.volume["id"], instance_uuid=server["id"], mountpoint="/dev/%s" % CONF.compute.volume_device_name ) waiters.wait_for_volume_status(self.client, self.volume["id"], "in-use") self.client.detach_volume(self.volume["id"]) waiters.wait_for_volume_status(self.client, self.volume["id"], "available")
def resource_setup(cls): super(VolumesActionsV2Test, cls).resource_setup() # Create a test shared volume for tests vol_name = utils.rand_name(cls.__name__ + '-Volume') cls.name_field = cls.special_fields['name_field'] params = {cls.name_field: vol_name} cls.volume = cls.client.create_volume(**params)['volume'] waiters.wait_for_volume_status(cls.client, cls.volume['id'], 'available')
def test_attach_detach_volume_to_instance(self): # Volume is attached and detached successfully from an instance self.client.attach_volume(self.volume['id'], instance_uuid=self.server['id'], mountpoint='/dev/%s' % CONF.compute.volume_device_name) waiters.wait_for_volume_status(self.client, self.volume['id'], 'in-use') self.client.detach_volume(self.volume['id']) waiters.wait_for_volume_status(self.client, self.volume['id'], 'available')
def attach_volume(self, server_id, volume_id): """Attachs a volume to a server""" self.servers_client.attach_volume( server_id, volumeId=volume_id, device='/dev/%s' % CONF.compute.volume_device_name) waiters.wait_for_volume_status(self.volumes_client, volume_id, 'in-use') self.addCleanup(waiters.wait_for_volume_status, self.volumes_client, volume_id, 'available') self.addCleanup(self.servers_client.detach_volume, server_id, volume_id)