예제 #1
0
    def test_create_group_from_group(self):
        # Create volume type
        volume_type = self.create_volume_type()

        # Create group type
        group_type = self.create_group_type()

        # Create Group
        grp = self._create_group(group_type, volume_type)

        # Create volume
        self.create_volume(volume_type=volume_type['id'], group_id=grp['id'])

        # Create Group from Group
        grp_name2 = data_utils.rand_name('Group_from_grp')
        grp2 = self.groups_client.create_group_from_source(
            source_group_id=grp['id'], name=grp_name2)['group']
        self.addCleanup(self._delete_group, grp2['id'])
        self.assertEqual(grp_name2, grp2['name'])
        vols = self.volumes_client.list_volumes(detail=True)['volumes']
        for vol in vols:
            if vol['group_id'] == grp2['id']:
                waiters.wait_for_volume_resource_status(
                    self.volumes_client, vol['id'], 'available')
        waiters.wait_for_volume_resource_status(
            self.groups_client, grp2['id'], 'available')
예제 #2
0
    def test_get_volume_attachment(self):
        # Create a server
        server = self.create_server()
        # Verify that a volume's attachment information is retrieved
        self.volumes_client.attach_volume(self.volume['id'],
                                          instance_uuid=server['id'],
                                          mountpoint='/dev/%s' %
                                          CONF.compute.volume_device_name)
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self.volume['id'],
                                                'in-use')
        self.addCleanup(waiters.wait_for_volume_resource_status,
                        self.volumes_client,
                        self.volume['id'], 'available')
        self.addCleanup(self.volumes_client.detach_volume, self.volume['id'])
        volume = self.volumes_client.show_volume(self.volume['id'])['volume']
        self.assertIn('attachments', volume)
        attachment = volume['attachments'][0]

        self.assertEqual('/dev/%s' %
                         CONF.compute.volume_device_name,
                         attachment['device'])
        self.assertEqual(server['id'], attachment['server_id'])
        self.assertEqual(self.volume['id'], attachment['id'])
        self.assertEqual(self.volume['id'], attachment['volume_id'])
예제 #3
0
 def delete_snapshot(snapshot_id):
     waiters.wait_for_volume_resource_status(self.snapshots_client,
                                             snapshot_id,
                                             'available')
     # Delete snapshot
     self.snapshots_client.delete_snapshot(snapshot_id)
     self.snapshots_client.wait_for_resource_deletion(snapshot_id)
예제 #4
0
    def test_create_list_delete_volume_transfer(self):
        # Create a volume first
        volume = self.create_volume()
        self.addCleanup(self.delete_volume,
                        self.adm_volumes_client,
                        volume['id'])

        # Create a volume transfer
        transfer_id = self.client.create_volume_transfer(
            volume_id=volume['id'])['transfer']['id']
        waiters.wait_for_volume_resource_status(
            self.volumes_client, volume['id'], 'awaiting-transfer')

        # List all volume transfers with details, check the detail-specific
        # elements, and look for the created transfer.
        transfers = self.client.list_volume_transfers(detail=True)['transfers']
        self.assertNotEmpty(transfers)
        for transfer in transfers:
            self.assertIn('created_at', transfer)
        volume_list = [transfer['volume_id'] for transfer in transfers]
        self.assertIn(volume['id'], volume_list,
                      'Transfer not found for volume %s' % volume['id'])

        # Delete a volume transfer
        self.client.delete_volume_transfer(transfer_id)
        waiters.wait_for_volume_resource_status(
            self.volumes_client, volume['id'], 'available')
예제 #5
0
    def test_force_detach_volume(self):
        # Create a server and a volume
        server_id = self.create_server()['id']
        volume_id = self.create_volume()['id']

        # Attach volume
        self.volumes_client.attach_volume(
            volume_id,
            instance_uuid=server_id,
            mountpoint='/dev/%s' % CONF.compute.volume_device_name)
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume_id, 'in-use')
        self.addCleanup(waiters.wait_for_volume_resource_status,
                        self.volumes_client, volume_id, 'available')
        self.addCleanup(self.volumes_client.detach_volume, volume_id)
        attachment = self.volumes_client.show_volume(
            volume_id)['volume']['attachments'][0]

        # Reset volume's status to error
        self.admin_volume_client.reset_volume_status(volume_id, status='error')

        # Force detach volume
        self.admin_volume_client.force_detach_volume(
            volume_id, connector=None,
            attachment_id=attachment['attachment_id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume_id, 'available')
        vol_info = self.volumes_client.show_volume(volume_id)['volume']
        self.assertIn('attachments', vol_info)
        self.assertEmpty(vol_info['attachments'])
예제 #6
0
 def test_reset_snapshot_status(self):
     # Reset snapshot status to creating
     status = 'creating'
     self.admin_snapshots_client.reset_snapshot_status(
         self.snapshot['id'], status)
     waiters.wait_for_volume_resource_status(self.snapshots_client,
                                             self.snapshot['id'], status)
예제 #7
0
    def nova_volume_detach(self, server, volume):
        self.servers_client.detach_volume(server['id'], volume['id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')

        volume = self.volumes_client.show_volume(volume['id'])['volume']
        self.assertEqual('available', volume['status'])
예제 #8
0
    def create_volume(self, size=None, name=None, snapshot_id=None,
                      imageRef=None, volume_type=None):
        if size is None:
            size = CONF.volume.volume_size
        if imageRef:
            image = self.compute_images_client.show_image(imageRef)['image']
            min_disk = image.get('minDisk')
            size = max(size, min_disk)
        if name is None:
            name = data_utils.rand_name(self.__class__.__name__ + "-volume")
        kwargs = {'display_name': name,
                  'snapshot_id': snapshot_id,
                  'imageRef': imageRef,
                  'volume_type': volume_type,
                  'size': size}
        volume = self.volumes_client.create_volume(**kwargs)['volume']

        self.addCleanup(self.volumes_client.wait_for_resource_deletion,
                        volume['id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.volumes_client.delete_volume, volume['id'])

        # NOTE(e0ne): Cinder API v2 uses name instead of display_name
        if 'display_name' in volume:
            self.assertEqual(name, volume['display_name'])
        else:
            self.assertEqual(name, volume['name'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')
        # The volume retrieved on creation has a non-up-to-date status.
        # Retrieval after it becomes active ensures correct details.
        volume = self.volumes_client.show_volume(volume['id'])['volume']
        return volume
예제 #9
0
파일: base.py 프로젝트: openstack/tempest
    def attach_volume(self, server, volume, device=None, tag=None):
        """Attaches volume to server and waits for 'in-use' volume status.

        The volume will be detached when the test tears down.

        :param server: The server to which the volume will be attached.
        :param volume: The volume to attach.
        :param device: Optional mountpoint for the attached volume. Note that
            this is not guaranteed for all hypervisors and is not recommended.
        :param tag: Optional device role tag to apply to the volume.
        """
        attach_kwargs = dict(volumeId=volume['id'])
        if device:
            attach_kwargs['device'] = device
        if tag:
            attach_kwargs['tag'] = tag

        attachment = self.servers_client.attach_volume(
            server['id'], **attach_kwargs)['volumeAttachment']
        # On teardown detach the volume and wait for it to be available. This
        # is so we don't error out when trying to delete the volume during
        # teardown.
        self.addCleanup(waiters.wait_for_volume_resource_status,
                        self.volumes_client, volume['id'], 'available')
        # Ignore 404s on detach in case the server is deleted or the volume
        # is already detached.
        self.addCleanup(self._detach_volume, server, volume)
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'in-use')
        return attachment
    def test_consistencygroup_cgsnapshot_create_delete(self):
        # Create volume type
        name = data_utils.rand_name("volume-type")
        volume_type = self.admin_volume_types_client.create_volume_type(
            name=name)['volume_type']

        # Create CG
        cg_name = data_utils.rand_name('CG')
        create_consistencygroup = (
            self.consistencygroups_adm_client.create_consistencygroup)
        cg = create_consistencygroup(volume_type['id'],
                                     name=cg_name)['consistencygroup']
        vol_name = data_utils.rand_name("volume")
        params = {'name': vol_name,
                  'volume_type': volume_type['id'],
                  'consistencygroup_id': cg['id'],
                  'size': CONF.volume.volume_size}

        # Create volume
        volume = self.admin_volume_client.create_volume(**params)['volume']
        waiters.wait_for_volume_resource_status(self.admin_volume_client,
                                                volume['id'], 'available')
        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
            cg['id'], 'available')
        self.assertEqual(cg_name, cg['name'])

        # Create cgsnapshot
        cgsnapshot_name = data_utils.rand_name('cgsnapshot')
        create_cgsnapshot = (
            self.consistencygroups_adm_client.create_cgsnapshot)
        cgsnapshot = create_cgsnapshot(cg['id'],
                                       name=cgsnapshot_name)['cgsnapshot']
        snapshots = self.admin_snapshots_client.list_snapshots(
            detail=True)['snapshots']
        for snap in snapshots:
            if volume['id'] == snap['volume_id']:
                waiters.wait_for_volume_resource_status(
                    self.admin_snapshots_client, snap['id'], 'available')
        self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
            cgsnapshot['id'], 'available')
        self.assertEqual(cgsnapshot_name, cgsnapshot['name'])

        # Get a given CG snapshot
        cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot(
            cgsnapshot['id'])['cgsnapshot']
        self.assertEqual(cgsnapshot_name, cgsnapshot['name'])

        # Get all CG snapshots with detail
        cgsnapshots = self.consistencygroups_adm_client.list_cgsnapshots(
            detail=True)['cgsnapshots']
        self.assertIn((cgsnapshot['name'], cgsnapshot['id']),
                      [(m['name'], m['id']) for m in cgsnapshots])

        # Clean up
        self._delete_cgsnapshot(cgsnapshot['id'], cg['id'])
        self._delete_consistencygroup(cg['id'])
        self.admin_volume_types_client.delete_volume_type(volume_type['id'])
예제 #11
0
 def tearDown(self):
     # Set snapshot's status to available after test
     status = 'available'
     snapshot_id = self.snapshot['id']
     self.admin_snapshots_client.reset_snapshot_status(snapshot_id,
                                                       status)
     waiters.wait_for_volume_resource_status(self.snapshots_client,
                                             snapshot_id, status)
     super(SnapshotsActionsTest, self).tearDown()
예제 #12
0
 def _create_reset_and_force_delete_temp_volume(self, status=None):
     # Create volume, reset volume status, and force delete temp volume
     temp_volume = self.create_volume()
     if status:
         self.admin_volume_client.reset_volume_status(
             temp_volume['id'], status=status)
         waiters.wait_for_volume_resource_status(
             self.volumes_client, temp_volume['id'], status)
     self.admin_volume_client.force_delete_volume(temp_volume['id'])
     self.volumes_client.wait_for_resource_deletion(temp_volume['id'])
예제 #13
0
    def nova_volume_attach(self, server, volume_to_attach):
        volume = self.servers_client.attach_volume(
            server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
            % CONF.compute.volume_device_name)['volumeAttachment']
        self.assertEqual(volume_to_attach['id'], volume['id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'in-use')

        # Return the updated volume after the attachment
        return self.volumes_client.show_volume(volume['id'])['volume']
예제 #14
0
 def test_volume_extend(self):
     # Extend Volume Test.
     volume = self.create_volume()
     extend_size = volume['size'] + 1
     self.volumes_client.extend_volume(volume['id'],
                                       new_size=extend_size)
     waiters.wait_for_volume_resource_status(self.volumes_client,
                                             volume['id'], 'available')
     volume = self.volumes_client.show_volume(volume['id'])['volume']
     self.assertEqual(volume['size'], extend_size)
예제 #15
0
    def _attach_volume_to_server(self):
        self._volume = self.create_test_volume()
        self._server, _ = self.create_test_server(wait_until='ACTIVE')

        self.volumes_client.attach_volume(self._volume['id'],
                                          instance_uuid=self._server['id'],
                                          mountpoint='/dev/vdc')

        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self._volume['id'], 'in-use')
예제 #16
0
    def test_attach_detach_volume(self):
        # Stop and Start a server with an attached volume, ensuring that
        # the volume remains attached.
        server, validation_resources = self._create_server()

        # NOTE(andreaf) Create one remote client used throughout the test.
        if CONF.validation.run_validation:
            linux_client = remote_client.RemoteClient(
                self.get_server_ip(server, validation_resources),
                self.image_ssh_user,
                self.image_ssh_password,
                validation_resources['keypair']['private_key'],
                server=server,
                servers_client=self.servers_client)
            # NOTE(andreaf) We need to ensure the ssh key has been
            # injected in the guest before we power cycle
            linux_client.validate_authentication()

        volume = self.create_volume()

        # NOTE: As of the 12.0.0 Liberty release, the Nova libvirt driver
        # no longer honors a user-supplied device name, in that case
        # CONF.compute.volume_device_name must be set the equal value as
        # the libvirt auto-assigned one
        attachment = self.attach_volume(server, volume,
                                        device=('/dev/%s' % self.device))

        self.servers_client.stop_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'SHUTOFF')

        self.servers_client.start_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'ACTIVE')

        if CONF.validation.run_validation:
            disks = linux_client.get_disks()
            device_name_to_match = '\n' + self.device + ' '
            self.assertIn(device_name_to_match, disks)

        self.servers_client.detach_volume(server['id'], attachment['volumeId'])
        waiters.wait_for_volume_resource_status(
            self.volumes_client, attachment['volumeId'], 'available')

        self.servers_client.stop_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'SHUTOFF')

        self.servers_client.start_server(server['id'])
        waiters.wait_for_server_status(self.servers_client, server['id'],
                                       'ACTIVE')

        if CONF.validation.run_validation:
            disks = linux_client.get_disks()
            self.assertNotIn(device_name_to_match, disks)
예제 #17
0
    def test_delete_server_while_in_attached_volume(self):
        # Delete a server while a volume is attached to it
        server = self.create_test_server(wait_until='ACTIVE')

        volume = self.create_volume()
        self.attach_volume(server, volume)

        self.client.delete_server(server['id'])
        waiters.wait_for_server_termination(self.client, server['id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')
예제 #18
0
파일: base.py 프로젝트: masayukig/tempest
 def attach_volume(self, server_id, volume_id):
     """Attach a volume to a server"""
     self.servers_client.attach_volume(
         server_id, volumeId=volume_id,
         device='/dev/%s' % CONF.compute.volume_device_name)
     waiters.wait_for_volume_resource_status(self.volumes_client,
                                             volume_id, 'in-use')
     self.addCleanup(waiters.wait_for_volume_resource_status,
                     self.volumes_client, volume_id, 'available')
     self.addCleanup(self.servers_client.detach_volume, server_id,
                     volume_id)
예제 #19
0
 def _create_reset_and_force_delete_temp_snapshot(self, status=None):
     # Create snapshot, reset snapshot status,
     # and force delete temp snapshot
     temp_snapshot = self.create_snapshot(volume_id=self.volume['id'])
     if status:
         self.admin_snapshots_client.reset_snapshot_status(
             temp_snapshot['id'], status)
         waiters.wait_for_volume_resource_status(
             self.snapshots_client, temp_snapshot['id'], status)
     self.admin_snapshots_client.force_delete_snapshot(temp_snapshot['id'])
     self.snapshots_client.wait_for_resource_deletion(temp_snapshot['id'])
예제 #20
0
    def test_group_snapshot_create_show_list_delete(self):
        # Create volume type
        volume_type = self.create_volume_type()

        # Create group type
        group_type = self.create_group_type()

        # Create group
        grp = self._create_group(group_type, volume_type)

        # Create volume
        vol = self.create_volume(volume_type=volume_type['id'],
                                 group_id=grp['id'])

        # Create group snapshot
        group_snapshot_name = data_utils.rand_name('group_snapshot')
        group_snapshot = (
            self.group_snapshots_client.create_group_snapshot(
                group_id=grp['id'],
                name=group_snapshot_name)['group_snapshot'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self._delete_group_snapshot,
                        group_snapshot['id'], grp['id'])
        snapshots = self.snapshots_client.list_snapshots(
            detail=True)['snapshots']
        for snap in snapshots:
            if vol['id'] == snap['volume_id']:
                waiters.wait_for_volume_resource_status(
                    self.snapshots_client, snap['id'], 'available')
        waiters.wait_for_volume_resource_status(
            self.group_snapshots_client,
            group_snapshot['id'], 'available')
        self.assertEqual(group_snapshot_name, group_snapshot['name'])

        # Get a given group snapshot
        group_snapshot = self.group_snapshots_client.show_group_snapshot(
            group_snapshot['id'])['group_snapshot']
        self.assertEqual(group_snapshot_name, group_snapshot['name'])

        # Get all group snapshots with details, check some detail-specific
        # elements, and look for the created group snapshot
        group_snapshots = (self.group_snapshots_client.list_group_snapshots(
            detail=True)['group_snapshots'])
        for grp_snapshot in group_snapshots:
            self.assertIn('created_at', grp_snapshot)
            self.assertIn('group_id', grp_snapshot)
        self.assertIn((group_snapshot['name'], group_snapshot['id']),
                      [(m['name'], m['id']) for m in group_snapshots])

        # Delete group snapshot
        self._delete_group_snapshot(group_snapshot['id'], grp['id'])
        group_snapshots = (self.group_snapshots_client.list_group_snapshots()
                           ['group_snapshots'])
        self.assertEmpty(group_snapshots)
예제 #21
0
파일: base.py 프로젝트: masayukig/tempest
    def create_group(self, **kwargs):
        if 'name' not in kwargs:
            kwargs['name'] = data_utils.rand_name(
                self.__class__.__name__ + '-Group')

        group = self.groups_client.create_group(**kwargs)['group']
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.delete_group, group['id'])
        waiters.wait_for_volume_resource_status(
            self.groups_client, group['id'], 'available')
        return group
예제 #22
0
    def test_snapshot_backup(self):
        # Create a snapshot
        snapshot = self.create_snapshot(volume_id=self.volume_origin['id'])

        backup = self.create_backup(volume_id=self.volume_origin['id'],
                                    snapshot_id=snapshot['id'])
        waiters.wait_for_volume_resource_status(self.snapshots_client,
                                                snapshot['id'], 'available')
        backup_info = self.backups_client.show_backup(backup['id'])['backup']
        self.assertEqual(self.volume_origin['id'], backup_info['volume_id'])
        self.assertEqual(snapshot['id'], backup_info['snapshot_id'])
예제 #23
0
    def test_group_update(self):
        # Create volume type
        volume_type = self.create_volume_type()

        # Create group type
        group_type = self.create_group_type()

        # Create Group
        grp = self.create_group(group_type=group_type['id'],
                                volume_types=[volume_type['id']])

        # Create volumes
        grp_vols = []
        for _ in range(2):
            vol = self.create_volume(volume_type=volume_type['id'],
                                     group_id=grp['id'])
            grp_vols.append(vol)
        vol2 = grp_vols[1]

        # Remove a volume from group and update name and description
        new_grp_name = 'new_group'
        new_desc = 'This is a new group'
        grp_params = {'name': new_grp_name,
                      'description': new_desc,
                      'remove_volumes': vol2['id']}
        self.groups_client.update_group(grp['id'], **grp_params)

        # Wait for group status to become available
        waiters.wait_for_volume_resource_status(
            self.groups_client, grp['id'], 'available')

        # Get the updated Group
        grp = self.groups_client.show_group(grp['id'])['group']
        self.assertEqual(new_grp_name, grp['name'])
        self.assertEqual(new_desc, grp['description'])

        # Get volumes in the group
        vols = self.volumes_client.list_volumes(detail=True)['volumes']
        grp_vols = [v for v in vols if v['group_id'] == grp['id']]
        self.assertEqual(1, len(grp_vols))

        # Add a volume to the group
        grp_params = {'add_volumes': vol2['id']}
        self.groups_client.update_group(grp['id'], **grp_params)

        # Wait for group status to become available
        waiters.wait_for_volume_resource_status(
            self.groups_client, grp['id'], 'available')

        # Get volumes in the group
        vols = self.volumes_client.list_volumes(detail=True)['volumes']
        grp_vols = [v for v in vols if v['group_id'] == grp['id']]
        self.assertEqual(2, len(grp_vols))
예제 #24
0
 def test_volume_reset_status(self):
     # test volume reset status : available->error->available->maintenance
     volume = self.create_volume()
     self.addCleanup(waiters.wait_for_volume_resource_status,
                     self.volumes_client, volume['id'], 'available')
     self.addCleanup(self.admin_volume_client.reset_volume_status,
                     volume['id'], status='available')
     for status in ['error', 'available', 'maintenance']:
         self.admin_volume_client.reset_volume_status(
             volume['id'], status=status)
         waiters.wait_for_volume_resource_status(
             self.volumes_client, volume['id'], status)
예제 #25
0
    def test_volume_swap_with_multiattach(self):
        # Create two volumes.
        # NOTE(gmann): Volumes are created before server creation so that
        # volumes cleanup can happen successfully irrespective of which volume
        # is attached to server.
        volume1 = self.create_volume(multiattach=True)
        # Make volume1 read-only since you can't swap from a volume with
        # multiple read/write attachments, and you can't change the readonly
        # flag on an in-use volume so we have to do this before attaching
        # volume1 to anything. If the compute API ever supports per-attachment
        # attach modes, then we can handle this differently.
        self.admin_volumes_client.update_volume_readonly(
            volume1['id'], readonly=True)
        volume2 = self.create_volume(multiattach=True)

        # Create two servers and wait for them to be ACTIVE.
        reservation_id = self.create_test_server(
            wait_until='ACTIVE', min_count=2,
            return_reservation_id=True)['reservation_id']
        # Get the servers using the reservation_id.
        servers = self.servers_client.list_servers(
            reservation_id=reservation_id)['servers']
        self.assertEqual(2, len(servers))
        # Attach volume1 to server1
        server1 = servers[0]
        self.attach_volume(server1, volume1)
        # Attach volume1 to server2
        server2 = servers[1]
        self.attach_volume(server2, volume1)

        # Swap volume1 to volume2 on server1, volume1 should remain attached
        # to server 2
        self.admin_servers_client.update_attached_volume(
            server1['id'], volume1['id'], volumeId=volume2['id'])
        # volume1 will return to in-use after the swap
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume1['id'], 'in-use')
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume2['id'], 'in-use')
        self.wait_for_server_volume_swap(server1['id'], volume1['id'],
                                         volume2['id'])

        # Verify volume2 is attached to server1
        vol_attachments = self.servers_client.list_volume_attachments(
            server1['id'])['volumeAttachments']
        self.assertEqual(1, len(vol_attachments))
        self.assertIn(volume2['id'], vol_attachments[0]['volumeId'])

        # Verify volume1 is still attached to server2
        vol_attachments = self.servers_client.list_volume_attachments(
            server2['id'])['volumeAttachments']
        self.assertEqual(1, len(vol_attachments))
        self.assertIn(volume1['id'], vol_attachments[0]['volumeId'])
예제 #26
0
파일: base.py 프로젝트: Juniper/tempest
    def create_snapshot(cls, volume_id=1, **kwargs):
        """Wrapper utility that returns a test snapshot."""
        if 'name' not in kwargs:
            name = data_utils.rand_name(cls.__name__ + '-Snapshot')
            kwargs['name'] = name

        snapshot = cls.snapshots_client.create_snapshot(
            volume_id=volume_id, **kwargs)['snapshot']
        cls.snapshots.append(snapshot['id'])
        waiters.wait_for_volume_resource_status(cls.snapshots_client,
                                                snapshot['id'], 'available')
        return snapshot
예제 #27
0
    def test_volume_extend_when_volume_has_snapshot(self):
        volume = self.create_volume()
        self.create_snapshot(volume['id'])

        extend_size = volume['size'] + 1
        self.volumes_client.extend_volume(volume['id'], new_size=extend_size)

        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')
        resized_volume = self.volumes_client.show_volume(
            volume['id'])['volume']
        self.assertEqual(extend_size, resized_volume['size'])
예제 #28
0
    def _resize_volume(self):
        volume = self.create_test_volume(size=1)
        self.volumes_client.extend_volume(volume['id'], new_size=2)

        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')

        self.addCleanup(self.volumes_client.wait_for_resource_deletion,
                        volume['id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.volumes_client.delete_volume, volume['id'])
        return volume
예제 #29
0
    def test_unmanage_manage_snapshot(self):
        # Create a volume
        volume = self.create_volume()

        # Create a snapshot
        snapshot = self.create_snapshot(volume_id=volume['id'])

        # Unmanage the snapshot
        # Unmanage snapshot function works almost the same as delete snapshot,
        # but it does not delete the snapshot data
        self.admin_snapshots_client.unmanage_snapshot(snapshot['id'])
        self.admin_snapshots_client.wait_for_resource_deletion(snapshot['id'])

        # Verify the original snapshot does not exist in snapshot list
        params = {'all_tenants': 1}
        all_snapshots = self.admin_snapshots_client.list_snapshots(
            detail=True, **params)['snapshots']
        self.assertNotIn(snapshot['id'], [v['id'] for v in all_snapshots])

        # Manage the snapshot
        name = data_utils.rand_name(self.__class__.__name__ +
                                    '-Managed-Snapshot')
        description = data_utils.rand_name(self.__class__.__name__ +
                                           '-Managed-Snapshot-Description')
        metadata = {"manage-snap-meta1": "value1",
                    "manage-snap-meta2": "value2",
                    "manage-snap-meta3": "value3"}
        snapshot_ref = {
            'volume_id': volume['id'],
            'ref': {CONF.volume.manage_snapshot_ref[0]:
                    CONF.volume.manage_snapshot_ref[1] % snapshot['id']},
            'name': name,
            'description': description,
            'metadata': metadata
        }
        new_snapshot = self.admin_snapshot_manage_client.manage_snapshot(
            **snapshot_ref)['snapshot']
        self.addCleanup(self.delete_snapshot, new_snapshot['id'],
                        self.admin_snapshots_client)

        # Wait for the snapshot to be available after manage operation
        waiters.wait_for_volume_resource_status(self.admin_snapshots_client,
                                                new_snapshot['id'],
                                                'available')

        # Verify the managed snapshot has the expected parent volume
        # and the expected field values.
        new_snapshot_info = self.admin_snapshots_client.show_snapshot(
            new_snapshot['id'])['snapshot']
        self.assertEqual(snapshot['size'], new_snapshot_info['size'])
        for key in ['volume_id', 'name', 'description', 'metadata']:
            self.assertEqual(snapshot_ref[key], new_snapshot_info[key])
예제 #30
0
    def _create_group_snapshot(self, **kwargs):
        if 'name' not in kwargs:
            kwargs['name'] = data_utils.rand_name(
                self.__class__.__name__ + '-Group_Snapshot')

        group_snapshot = self.group_snapshots_client.create_group_snapshot(
            **kwargs)['group_snapshot']
        group_snapshot['group_id'] = kwargs['group_id']
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self._delete_group_snapshot, group_snapshot)
        waiters.wait_for_volume_resource_status(
            self.group_snapshots_client, group_snapshot['id'], 'available')
        return group_snapshot
예제 #31
0
    def restore_backup(self, backup_id):
        # Restore a backup
        restored_volume = self.backups_client.restore_backup(
            backup_id)['restore']

        # Delete backup
        self.addCleanup(self.volumes_client.delete_volume,
                        restored_volume['volume_id'])
        self.assertEqual(backup_id, restored_volume['backup_id'])
        waiters.wait_for_volume_resource_status(self.backups_client, backup_id,
                                                'available')
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                restored_volume['volume_id'],
                                                'available')
        return restored_volume
예제 #32
0
    def test_volume_crud_with_volume_type_and_extra_specs(self):
        # Create/update/get/delete volume with volume_type and extra spec.
        volume_types = list()
        vol_name = data_utils.rand_name(self.__class__.__name__ + '-volume')
        name_field = self.special_fields['name_field']
        proto = CONF.volume.storage_protocol
        vendor = CONF.volume.vendor_name
        extra_specs = {"storage_protocol": proto,
                       "vendor_name": vendor}
        # Create two volume_types
        for _ in range(2):
            vol_type = self.create_volume_type(
                extra_specs=extra_specs)
            volume_types.append(vol_type)
        params = {name_field: vol_name,
                  'volume_type': volume_types[0]['id'],
                  'size': CONF.volume.volume_size}

        # Create volume
        volume = self.create_volume(**params)
        self.assertEqual(volume_types[0]['name'], volume["volume_type"])
        self.assertEqual(volume[name_field], vol_name,
                         "The created volume name is not equal "
                         "to the requested name")
        self.assertIsNotNone(volume['id'],
                             "Field volume id is empty or not found.")
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')

        # Update volume with new volume_type
        self.volumes_client.retype_volume(volume['id'],
                                          new_type=volume_types[1]['id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')

        # Get volume details and Verify
        fetched_volume = self.volumes_client.show_volume(
            volume['id'])['volume']
        self.assertEqual(volume_types[1]['name'],
                         fetched_volume['volume_type'],
                         'The fetched Volume type is different '
                         'from updated volume type')
        self.assertEqual(vol_name, fetched_volume[name_field],
                         'The fetched Volume is different '
                         'from the created Volume')
        self.assertEqual(volume['id'], fetched_volume['id'],
                         'The fetched Volume is different '
                         'from the created Volume')
예제 #33
0
    def test_volume_backup_restore(self):
        # Create volume from image
        img_uuid = CONF.compute.image_ref
        volume = self.create_volume(imageRef=img_uuid)
        volume_details = self.volumes_client.show_volume(
            volume['id'])['volume']
        self.assertEqual('true', volume_details['bootable'])

        # Create a backup
        backup = self.create_backup(volume_id=volume['id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')

        # Restore the backup
        restored_volume_id = self.restore_backup(backup['id'])['volume_id']

        # Verify the restored backup volume is bootable
        restored_volume_info = self.volumes_client.show_volume(
            restored_volume_id)['volume']
        self.assertEqual('true', restored_volume_info['bootable'])

        # Create keypair and security group
        keypair = self.create_keypair()
        security_group = self.create_security_group()

        # Boot a server from the restored backup
        bd_map_v2 = [{
            'uuid': restored_volume_id,
            'source_type': 'volume',
            'destination_type': 'volume',
            'boot_index': 0
        }]
        server = self.create_server(image_id='',
                                    block_device_mapping_v2=bd_map_v2,
                                    key_name=keypair['name'],
                                    security_groups=[{
                                        'name':
                                        security_group['name']
                                    }])

        # Create a floating ip and associate it to server.
        fip = self.create_floating_ip(server)
        floating_ip = self.associate_floating_ip(fip, server)
        # Check server connectivity
        self.check_vm_connectivity(floating_ip['floating_ip_address'],
                                   username=CONF.validation.image_ssh_user,
                                   private_key=keypair['private_key'],
                                   should_connect=True)
예제 #34
0
    def test_quota_usage_after_volume_transfer(self):
        # Create a volume for transfer
        volume = self.create_volume()
        self.addCleanup(self.delete_volume, self.admin_volume_client,
                        volume['id'])

        # List of tenants quota usage pre-transfer
        primary_quota = self.admin_quotas_client.show_quota_set(
            self.demo_tenant_id, params={'usage': True})['quota_set']

        alt_quota = self.admin_quotas_client.show_quota_set(
            self.os_alt.volumes_client_latest.tenant_id,
            params={'usage': True})['quota_set']

        # Creates a volume transfer
        transfer = self.transfer_client.create_volume_transfer(
            volume_id=volume['id'])['transfer']
        transfer_id = transfer['id']
        auth_key = transfer['auth_key']

        # Accepts a volume transfer
        self.alt_transfer_client.accept_volume_transfer(transfer_id,
                                                        auth_key=auth_key)

        # Verify volume transferred is available
        waiters.wait_for_volume_resource_status(
            self.os_alt.volumes_client_latest, volume['id'], 'available')

        # List of tenants quota usage post transfer
        new_primary_quota = self.admin_quotas_client.show_quota_set(
            self.demo_tenant_id, params={'usage': True})['quota_set']

        new_alt_quota = self.admin_quotas_client.show_quota_set(
            self.os_alt.volumes_client_latest.tenant_id,
            params={'usage': True})['quota_set']

        # Verify tenants quota usage was updated
        self.assertEqual(
            primary_quota['volumes']['in_use'] -
            new_primary_quota['volumes']['in_use'],
            new_alt_quota['volumes']['in_use'] -
            alt_quota['volumes']['in_use'])

        self.assertEqual(alt_quota['gigabytes']['in_use'] + volume['size'],
                         new_alt_quota['gigabytes']['in_use'])

        self.assertEqual(primary_quota['gigabytes']['in_use'] - volume['size'],
                         new_primary_quota['gigabytes']['in_use'])
예제 #35
0
    def test_group_snapshot_create_show_list_delete(self):
        # Create volume type
        volume_type = self.create_volume_type()

        # Create group type
        group_type = self.create_group_type()

        # Create group
        grp = self.create_group(group_type=group_type['id'],
                                volume_types=[volume_type['id']])

        # Create volume
        vol = self.create_volume(volume_type=volume_type['id'],
                                 group_id=grp['id'])

        # Create group snapshot
        group_snapshot_name = data_utils.rand_name('group_snapshot')
        group_snapshot = self._create_group_snapshot(group_id=grp['id'],
                                                     name=group_snapshot_name)
        snapshots = self.snapshots_client.list_snapshots(
            detail=True)['snapshots']
        for snap in snapshots:
            if vol['id'] == snap['volume_id']:
                waiters.wait_for_volume_resource_status(
                    self.snapshots_client, snap['id'], 'available')
        self.assertEqual(group_snapshot_name, group_snapshot['name'])

        # Get a given group snapshot
        group_snapshot = self.group_snapshots_client.show_group_snapshot(
            group_snapshot['id'])['group_snapshot']
        self.assertEqual(group_snapshot_name, group_snapshot['name'])

        # Get all group snapshots with details, check some detail-specific
        # elements, and look for the created group snapshot
        group_snapshots = self.group_snapshots_client.list_group_snapshots(
            detail=True)['group_snapshots']
        for grp_snapshot in group_snapshots:
            self.assertIn('created_at', grp_snapshot)
            self.assertIn('group_id', grp_snapshot)
        self.assertIn((group_snapshot['name'], group_snapshot['id']),
                      [(m['name'], m['id']) for m in group_snapshots])

        # Delete group snapshot
        self._delete_group_snapshot(group_snapshot)
        group_snapshots = self.group_snapshots_client.list_group_snapshots(
        )['group_snapshots']
        self.assertNotIn((group_snapshot['name'], group_snapshot['id']),
                         [(m['name'], m['id']) for m in group_snapshots])
예제 #36
0
    def upload_volume(self, volume):
        image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')

        body = self.volumes_client.upload_volume(
            volume['id'],
            image_name=image_name,
            disk_format=CONF.volume.disk_format)['os-volume_upload_image']
        image_id = body['image_id']
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.compute_images_client.delete_image, image_id)
        waiters.wait_for_image_status(self.compute_images_client, image_id,
                                      'ACTIVE')
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self.volume['id'], 'available')

        return image_id
    def create_volume_with_args(cls, **kwargs):
        if 'name' not in kwargs:
            kwargs['name'] = data_utils.rand_name('Volume')

        kwargs['size'] = CONF.volume.volume_size

        volume = cls.volumes_client.create_volume(**kwargs)['volume']
        cls.addClassResourceCleanup(
            cls.volumes_client.wait_for_resource_deletion, volume['id'])
        cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
                                    cls.volumes_client.delete_volume,
                                    volume['id'])
        waiters.wait_for_volume_resource_status(cls.volumes_client,
                                                volume['id'], 'available')

        return volume
예제 #38
0
    def test_volume_upload_public(self):
        # This also enforces "volume_extension:volume_actions:upload_image".
        image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')

        with self.override_role():
            body = self.volumes_client.upload_volume(
                self.volume['id'],
                image_name=image_name,
                visibility="public",
                disk_format=CONF.volume.disk_format)['os-volume_upload_image']
            image_id = body["image_id"]
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.image_client.delete_image, image_id)
        waiters.wait_for_image_status(self.image_client, image_id, 'active')
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                self.volume['id'], 'available')
예제 #39
0
 def _create_volume_snapshot(self, volume):
     snapshot_name = data_utils.rand_name('scenario-snapshot')
     snapshot = self.snapshots_client.create_snapshot(
         volume_id=volume['id'], display_name=snapshot_name)['snapshot']
     self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                     snapshot['id'])
     self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
     waiters.wait_for_volume_resource_status(self.volumes_client,
                                             volume['id'], 'available')
     waiters.wait_for_volume_resource_status(self.snapshots_client,
                                             snapshot['id'], 'available')
     if 'display_name' in snapshot:
         self.assertEqual(snapshot_name, snapshot['display_name'])
     else:
         self.assertEqual(snapshot_name, snapshot['name'])
     return snapshot
    def test_create_from_image_multiple(self):
        """Create a handful of volumes from the same image at once.

        The purpose of this test is to stress volume drivers,
        image download, the image cache, etc., within Cinder.
        """

        img_uuid = CONF.compute.image_ref

        vols = []
        for v in range(0, 5):
            vols.append(self.create_volume_no_wait(imageRef=img_uuid))

        for v in vols:
            waiters.wait_for_volume_resource_status(self.volumes_client,
                                                    v['id'], 'available')
예제 #41
0
    def attach_volume(self, server, volume):
        """Attaches volume to server

        param server: Created server details
        param volume: Created volume details
        :return volume_attachment: Volume attachment information.
        """
        attach_args = dict(volumeId=volume['id'])
        attachment = self.servers_client.attach_volume(
            server['id'], **attach_args)['volumeAttachment']
        self.addCleanup(waiters.wait_for_volume_resource_status,
                        self.volumes_client, volume['id'], 'available')
        self.addCleanup(self._detach_volume, server, volume)
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'in-use')
        return attachment
예제 #42
0
    def test_force_detach_volume_from_instance(self):
        volume = self.create_volume()
        server = self._create_server()
        self._attach_volume(server, volume['id'])
        attachment = self.volumes_client.show_volume(
            volume['id'])['volume']['attachments'][0]

        # Reset volume's status to error.
        self.volumes_client.reset_volume_status(volume['id'], status='error')

        self.rbac_utils.switch_role(self, toggle_rbac_role=True)
        self.volumes_client.force_detach_volume(
            volume['id'], connector=None,
            attachment_id=attachment['attachment_id'])
        waiters.wait_for_volume_resource_status(self.admin_volumes_client,
                                                volume['id'], 'available')
예제 #43
0
    def test_volume_reset_status(self):
        """Test resetting volume status

        Reset volume status to available->error->available->maintenance
        """
        volume = self.create_volume()
        self.addCleanup(waiters.wait_for_volume_resource_status,
                        self.volumes_client, volume['id'], 'available')
        self.addCleanup(self.admin_volume_client.reset_volume_status,
                        volume['id'],
                        status='available')
        for status in ['error', 'available', 'maintenance']:
            self.admin_volume_client.reset_volume_status(volume['id'],
                                                         status=status)
            waiters.wait_for_volume_resource_status(self.volumes_client,
                                                    volume['id'], status)
예제 #44
0
    def create_backup(self, volume_id, backup_client=None, **kwargs):
        """Wrapper utility that returns a test backup."""
        if backup_client is None:
            backup_client = self.backups_client
        if 'name' not in kwargs:
            name = data_utils.rand_name(self.__class__.__name__ + '-Backup')
            kwargs['name'] = name

        backup = backup_client.create_backup(volume_id=volume_id,
                                             **kwargs)['backup']
        self.addCleanup(backup_client.wait_for_resource_deletion, backup['id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        backup_client.delete_backup, backup['id'])
        waiters.wait_for_volume_resource_status(backup_client, backup['id'],
                                                'available')
        return backup
예제 #45
0
    def _detach_multiattach_volume(self, volume_id, server_id):
        """Detaches a multiattach volume from the given server.

        Depending on the number of attachments the volume has, this method
        will wait for the volume to go to back to 'in-use' status if there are
        more attachments or 'available' state if there are no more attachments.
        """
        # Count the number of attachments before starting the detach.
        volume = self.volumes_client.show_volume(volume_id)['volume']
        attachments = volume['attachments']
        wait_status = 'in-use' if len(attachments) > 1 else 'available'
        # Now detach the volume from the given server.
        self.servers_client.detach_volume(server_id, volume_id)
        # Now wait for the volume status to change.
        waiters.wait_for_volume_resource_status(self.volumes_client, volume_id,
                                                wait_status)
예제 #46
0
 def test_volume_upload(self):
     # NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
     # it is shared with the other tests. After it is uploaded in Glance,
     # there is no way to delete it from Cinder, so we delete it from Glance
     # using the Glance images_client and from Cinder via tearDownClass.
     image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')
     body = self.volumes_client.upload_volume(
         self.volume['id'],
         image_name=image_name,
         disk_format=CONF.volume.disk_format)['os-volume_upload_image']
     image_id = body["image_id"]
     self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                     self.images_client.delete_image, image_id)
     waiters.wait_for_image_status(self.images_client, image_id, 'active')
     waiters.wait_for_volume_resource_status(self.volumes_client,
                                             self.volume['id'], 'available')
 def _recreate_volume(self):
     try:
         # In case detachment failed, update the DB status of the volume
         # to avoid error getting thrown when deleting the volume.
         self.volumes_client.reset_volume_status(
             self.volume['id'], status='available',
             attach_status='detached')
         waiters.wait_for_volume_resource_status(
             self.volumes_client, self.volume['id'], 'available')
         # Next, forcibly delete the volume.
         self.volumes_client.force_delete_volume(self.volume['id'])
         self.volumes_client.wait_for_resource_deletion(self.volume['id'])
     except lib_exc.TimeoutException:
         LOG.exception('Failed to delete volume %s', self.volume['id'])
     # Finally, re-create the volume.
     self.__class__.volume = self.create_volume()
예제 #48
0
    def create_snapshot(cls, volume_id=1, **kwargs):
        """Wrapper utility that returns a test snapshot."""
        if 'name' not in kwargs:
            name = data_utils.rand_name(cls.__name__ + '-Snapshot')
            kwargs['name'] = name

        snapshot = cls.snapshots_client.create_snapshot(volume_id=volume_id,
                                                        **kwargs)['snapshot']
        cls.addClassResourceCleanup(
            cls.snapshots_client.wait_for_resource_deletion, snapshot['id'])
        cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
                                    cls.snapshots_client.delete_snapshot,
                                    snapshot['id'])
        waiters.wait_for_volume_resource_status(cls.snapshots_client,
                                                snapshot['id'], 'available')
        return snapshot
예제 #49
0
파일: base.py 프로젝트: sapcc/tempest
    def attach_volume(self, server, volume, device=None, tag=None):
        """Attaches volume to server and waits for 'in-use' volume status.

        The volume will be detached when the test tears down.

        :param server: The server to which the volume will be attached.
        :param volume: The volume to attach.
        :param device: Optional mountpoint for the attached volume. Note that
            this is not guaranteed for all hypervisors and is not recommended.
        :param tag: Optional device role tag to apply to the volume.
        """
        attach_kwargs = dict(volumeId=volume['id'])
        if device:
            attach_kwargs['device'] = device
        if tag:
            attach_kwargs['tag'] = tag

        attachment = self.servers_client.attach_volume(
            server['id'], **attach_kwargs)['volumeAttachment']

        # NOTE(lyarwood): During attach we initially wait for the volume
        # attachment and then check the volume state.
        waiters.wait_for_volume_attachment_create(
            self.volumes_client, volume['id'], server['id'])
        # TODO(lyarwood): Remove the following volume status checks and move to
        # attachment status checks across all volumes now with the 3.27
        # microversion somehow.
        if not volume['multiattach']:
            waiters.wait_for_volume_resource_status(
                self.volumes_client, volume['id'], 'in-use')

        # NOTE(lyarwood): On teardown (LIFO) initially wait for the volume
        # attachment in Nova to be removed. While this technically happens last
        # we want this to be the first waiter as if it fails we can then dump
        # the contents of the console log. The final check of the volume state
        # should be a no-op by this point and is just added for completeness
        # when detaching non-multiattach volumes.
        if not volume['multiattach']:
            self.addCleanup(
                waiters.wait_for_volume_resource_status, self.volumes_client,
                volume['id'], 'available')
        self.addCleanup(
            waiters.wait_for_volume_attachment_remove_from_server,
            self.servers_client, server['id'], volume['id'])
        self.addCleanup(self._detach_volume, server, volume)

        return attachment
예제 #50
0
파일: manager.py 프로젝트: zhatin/nova-lxd
    def create_server_snapshot(self, server, name=None):
        # Glance client
        _image_client = self.image_client
        # Compute client
        _images_client = self.compute_images_client
        if name is None:
            name = data_utils.rand_name(self.__class__.__name__ + 'snapshot')
        LOG.debug("Creating a snapshot image for server: {}"
                  .format(server['name']))
        image = _images_client.create_image(server['id'], name=name)
        image_id = image.response['location'].split('images/')[1]
        waiters.wait_for_image_status(_image_client, image_id, 'active')

        self.addCleanup(_image_client.wait_for_resource_deletion,
                        image_id)
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        _image_client.delete_image, image_id)

        if CONF.image_feature_enabled.api_v1:
            # In glance v1 the additional properties are stored in the headers.
            resp = _image_client.check_image(image_id)
            snapshot_image = common_image.get_image_meta_from_headers(resp)
            image_props = snapshot_image.get('properties', {})
        else:
            # In glance v2 the additional properties are flattened.
            snapshot_image = _image_client.show_image(image_id)
            image_props = snapshot_image

        bdm = image_props.get('block_device_mapping')
        if bdm:
            bdm = json.loads(bdm)
            if bdm and 'snapshot_id' in bdm[0]:
                snapshot_id = bdm[0]['snapshot_id']
                self.addCleanup(
                    self.snapshots_client.wait_for_resource_deletion,
                    snapshot_id)
                self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                                self.snapshots_client.delete_snapshot,
                                snapshot_id)
                waiters.wait_for_volume_resource_status(self.snapshots_client,
                                                        snapshot_id,
                                                        'available')
        image_name = snapshot_image['name']
        self.assertEqual(name, image_name)
        LOG.debug("Created snapshot image {} for server {}"
                  .format(image_name, server['name']))
        return snapshot_image
    def test_create_consistencygroup_from_consistencygroup(self):
        # Create volume type
        name = data_utils.rand_name("volume-type")
        volume_type = self.admin_volume_types_client.create_volume_type(
            name=name)['volume_type']

        # Create CG
        cg_name = data_utils.rand_name('CG')
        create_consistencygroup = (
            self.consistencygroups_adm_client.create_consistencygroup)
        cg = create_consistencygroup(volume_type['id'],
                                     name=cg_name)['consistencygroup']
        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
            cg['id'], 'available')
        self.assertEqual(cg_name, cg['name'])

        # Create volume
        vol_name = data_utils.rand_name("volume")
        params = {
            'name': vol_name,
            'volume_type': volume_type['id'],
            'consistencygroup_id': cg['id'],
            'size': CONF.volume.volume_size
        }
        volume = self.admin_volume_client.create_volume(**params)['volume']
        waiters.wait_for_volume_resource_status(self.admin_volume_client,
                                                volume['id'], 'available')

        # Create CG from CG
        cg_name2 = data_utils.rand_name('CG_from_cg')
        create_consistencygroup2 = (
            self.consistencygroups_adm_client.create_consistencygroup_from_src)
        cg2 = create_consistencygroup2(source_cgid=cg['id'],
                                       name=cg_name2)['consistencygroup']
        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
            cg2['id'], 'available')
        self.assertEqual(cg_name2, cg2['name'])
        vols = self.admin_volume_client.list_volumes(detail=True)['volumes']
        for vol in vols:
            if vol['consistencygroup_id'] == cg2['id']:
                waiters.wait_for_volume_resource_status(
                    self.admin_volume_client, vol['id'], 'available')

        # Clean up
        self._delete_consistencygroup(cg2['id'])
        self._delete_consistencygroup(cg['id'])
        self.admin_volume_types_client.delete_volume_type(volume_type['id'])
예제 #52
0
    def test_unmanage_manage_volume(self):
        """Test unmanaging and managing volume"""
        # Create original volume
        org_vol_id = self.create_volume()['id']
        org_vol_info = self.admin_volume_client.show_volume(
            org_vol_id)['volume']

        # Unmanage the original volume
        self.admin_volume_client.unmanage_volume(org_vol_id)
        self.admin_volume_client.wait_for_resource_deletion(org_vol_id)

        # Verify the original volume does not exist in volume list
        params = {'all_tenants': 1}
        all_tenants_volumes = self.admin_volume_client.list_volumes(
            detail=True, params=params)['volumes']
        self.assertNotIn(org_vol_id, [v['id'] for v in all_tenants_volumes])

        # Manage volume
        new_vol_name = data_utils.rand_name(self.__class__.__name__ +
                                            '-volume')
        new_vol_ref = {
            'name': new_vol_name,
            'host': org_vol_info['os-vol-host-attr:host'],
            'ref': {
                CONF.volume.manage_volume_ref[0]:
                CONF.volume.manage_volume_ref[1] % org_vol_id
            },
            'volume_type': org_vol_info['volume_type'],
            'availability_zone': org_vol_info['availability_zone']
        }
        new_vol_id = self.admin_volume_manage_client.manage_volume(
            **new_vol_ref)['volume']['id']
        self.addCleanup(self.delete_volume, self.admin_volume_client,
                        new_vol_id)
        waiters.wait_for_volume_resource_status(self.admin_volume_client,
                                                new_vol_id, 'available')

        # Compare the managed volume with the original
        new_vol_info = self.admin_volume_client.show_volume(
            new_vol_id)['volume']
        self.assertNotIn(new_vol_id, [org_vol_id])
        self.assertEqual(new_vol_info['name'], new_vol_name)
        for key in [
                'size', 'volume_type', 'availability_zone',
                'os-vol-host-attr:host'
        ]:
            self.assertEqual(new_vol_info[key], org_vol_info[key])
예제 #53
0
    def _create_snapshot_from_volume(self, vol_id):
        snap_name = data_utils.rand_name(self.__class__.__name__ + '-snapshot')
        snap = self.snapshots_client.create_snapshot(
            volume_id=vol_id, force=True, display_name=snap_name)['snapshot']
        self.addCleanup(self.snapshots_client.wait_for_resource_deletion,
                        snap['id'])
        self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
        waiters.wait_for_volume_resource_status(self.snapshots_client,
                                                snap['id'], 'available')

        # NOTE(e0ne): Cinder API v2 uses name instead of display_name
        if 'display_name' in snap:
            self.assertEqual(snap_name, snap['display_name'])
        else:
            self.assertEqual(snap_name, snap['name'])

        return snap
예제 #54
0
    def test_volume_upload(self):
        # TODO(felipemonteiro): The ``upload_volume`` endpoint also enforces
        # "volume:copy_volume_to_image" but is not currently contained in
        # Cinder's policy.json.
        image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')

        self.rbac_utils.switch_role(self, toggle_rbac_role=True)
        body = self.volumes_client.upload_volume(
            self.volume['id'], image_name=image_name, visibility="private",
            disk_format=CONF.volume.disk_format)['os-volume_upload_image']
        image_id = body["image_id"]
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.image_client.delete_image,
                        image_id)
        waiters.wait_for_image_status(self.image_client, image_id, 'active')
        waiters.wait_for_volume_resource_status(self.os_admin.volumes_client,
                                                self.volume['id'], 'available')
예제 #55
0
    def test_volume_swap_with_multiattach(self):
        # Create two volumes.
        # NOTE(gmann): Volumes are created before server creation so that
        # volumes cleanup can happen successfully irrespective of which volume
        # is attached to server.
        volume1 = self.create_volume(multiattach=True)
        volume2 = self.create_volume(multiattach=True)

        # Create two servers and wait for them to be ACTIVE.
        reservation_id = self.create_test_server(
            wait_until='ACTIVE', min_count=2,
            return_reservation_id=True)['reservation_id']
        # Get the servers using the reservation_id.
        servers = self.servers_client.list_servers(
            reservation_id=reservation_id)['servers']
        self.assertEqual(2, len(servers))
        # Attach volume1 to server1
        server1 = servers[0]
        self.attach_volume(server1, volume1)
        # Attach volume1 to server2
        server2 = servers[1]
        self.attach_volume(server2, volume1)

        # Swap volume1 to volume2 on server1, volume1 should remain attached
        # to server 2
        self.admin_servers_client.update_attached_volume(
            server1['id'], volume1['id'], volumeId=volume2['id'])
        # volume1 will return to in-use after the swap
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume1['id'], 'in-use')
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume2['id'], 'in-use')
        self.wait_for_server_volume_swap(server1['id'], volume1['id'],
                                         volume2['id'])

        # Verify volume2 is attached to server1
        vol_attachments = self.servers_client.list_volume_attachments(
            server1['id'])['volumeAttachments']
        self.assertEqual(1, len(vol_attachments))
        self.assertIn(volume2['id'], vol_attachments[0]['volumeId'])

        # Verify volume1 is still attached to server2
        vol_attachments = self.servers_client.list_volume_attachments(
            server2['id'])['volumeAttachments']
        self.assertEqual(1, len(vol_attachments))
        self.assertIn(volume1['id'], vol_attachments[0]['volumeId'])
예제 #56
0
    def create_backup(self, volume_id, backup_client=None, **kwargs):
        """Wrapper utility that returns a test backup."""
        if backup_client is None:
            backup_client = self.backups_client
        if 'name' not in kwargs:
            name = data_utils.rand_name(self.__class__.__name__ + '-Backup')
            kwargs['name'] = name

        backup = backup_client.create_backup(volume_id=volume_id,
                                             **kwargs)['backup']
        # addCleanup uses list pop to cleanup. Wait should be added before
        # the backup is deleted
        self.addCleanup(backup_client.wait_for_resource_deletion, backup['id'])
        self.addCleanup(backup_client.delete_backup, backup['id'])
        waiters.wait_for_volume_resource_status(backup_client, backup['id'],
                                                'available')
        return backup
예제 #57
0
    def test_attach_scsi_disk_with_config_drive(self):
        """Test the attach/detach volume with config drive/scsi disk

        Enable the config drive, followed by booting an instance
        from an image with meta properties hw_cdrom: scsi and use
        virtio-scsi mode with further asserting list volume attachments
        in instance after attach and detach of the volume.
        """
        custom_img = self._create_image_with_custom_property(
            hw_scsi_model='virtio-scsi',
            hw_disk_bus='scsi',
            hw_cdrom_bus='scsi')
        validation_resources = self.get_test_validation_resources(
            self.os_primary)
        server = self.create_test_server(
            image_id=custom_img,
            config_drive=True,
            validatable=True,
            validation_resources=validation_resources,
            wait_until="SSHABLE")
        # NOTE(lyarwood): self.create_test_server delete the server
        # at class level cleanup so add server cleanup to ensure that
        # the instance is deleted first before created image. This
        # avoids failures when using the rbd backend is used for both
        # Glance and Nova ephemeral storage. Also wait until server is
        # deleted otherwise image deletion can start before server is
        # deleted.
        self.addCleanup(waiters.wait_for_server_termination,
                        self.servers_client, server['id'])
        self.addCleanup(self.servers_client.delete_server, server['id'])

        volume = self.create_volume()
        attachment = self.attach_volume(server, volume)
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                attachment['volumeId'],
                                                'in-use')
        volume_after_attach = self.servers_client.list_volume_attachments(
            server['id'])['volumeAttachments']
        self.assertEqual(1, len(volume_after_attach),
                         "Failed to attach volume")
        self.servers_client.detach_volume(server['id'], attachment['volumeId'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                attachment['volumeId'],
                                                'available')
        waiters.wait_for_volume_attachment_remove_from_server(
            self.servers_client, server['id'], attachment['volumeId'])
예제 #58
0
    def test_detach_volume_shelved_or_offload_server(self):
        # Count number of volumes on instance, shelve
        # server and attach pre-created volume to shelved server
        server = self._create_server()
        volume = self.create_volume()
        num_vol = self._count_volumes(server)
        self._shelve_server(server)

        # Attach and then detach the volume
        self.attach_volume(server, volume, device=('/dev/%s' % self.device))
        self.servers_client.detach_volume(server['id'], volume['id'])
        waiters.wait_for_volume_resource_status(self.volumes_client,
                                                volume['id'], 'available')

        # Unshelve the instance and check that we have the expected number of
        # volume(s)
        self._unshelve_server_and_check_volumes(server, num_vol)
 def test_create_group_snapshot(self):
     with self.override_role():
         name = data_utils.rand_name(
             self.__class__.__name__ + '-Group_Snapshot')
         group_snapshot = self.group_snapshots_client.create_group_snapshot(
             name=name, group_id=self.grp['id'])['group_snapshot']
     group_snapshot['group_id'] = self.grp['id']
     self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                     self._delete_group_snapshot, group_snapshot)
     waiters.wait_for_volume_resource_status(
         self.group_snapshots_client, group_snapshot['id'], 'available')
     snapshots = self.snapshots_client.list_snapshots(
         detail=True)['snapshots']
     for snap in snapshots:
         if self.vol['id'] == snap['volume_id']:
             waiters.wait_for_volume_resource_status(
                 self.snapshots_client, snap['id'], 'available')
예제 #60
0
 def test_manage_snapshot_rbac(self):
     name = data_utils.rand_name(self.__class__.__name__ +
                                 '-Managed-Snapshot')
     snapshot_ref = {
         'volume_id': self.volume['id'],
         'ref': {CONF.volume.manage_snapshot_ref[0]:
                 CONF.volume.manage_snapshot_ref[1] % self.snapshot['id']},
         'name': name
     }
     with self.rbac_utils.override_role(self):
         snapshot = self.snapshot_manage_client.manage_snapshot(
             **snapshot_ref)['snapshot']
     self.addCleanup(self.delete_snapshot, snapshot['id'],
                     self.snapshots_client)
     waiters.wait_for_volume_resource_status(self.snapshots_client,
                                             snapshot['id'],
                                             'available')