Ejemplo n.º 1
0
 def test_resource_removed_fail(self):
     openstack_utils.resource_removed.retry.wait = \
         tenacity.wait_none()
     resource_mock = mock.MagicMock()
     resource_mock.list.return_value = [mock.MagicMock(id='e01df65a')]
     with self.assertRaises(AssertionError):
         openstack_utils.resource_removed(resource_mock, 'e01df65a')
Ejemplo n.º 2
0
    def test_resource_removed_custom_retry(self):
        self.patch_object(openstack_utils, "_resource_removed")

        def _retryer(f, arg1, arg2, arg3):
            f(arg1, arg2, arg3)

        self.patch_object(openstack_utils.tenacity,
                          "Retrying",
                          return_value=_retryer)
        saa_mock = mock.MagicMock()
        self.patch_object(openstack_utils.tenacity,
                          "stop_after_attempt",
                          return_value=saa_mock)
        we_mock = mock.MagicMock()
        self.patch_object(openstack_utils.tenacity,
                          "wait_exponential",
                          return_value=we_mock)
        self._resource_removed.return_value = True
        openstack_utils.resource_removed('resource',
                                         'e01df65a',
                                         wait_exponential_multiplier=2,
                                         wait_iteration_max_time=20,
                                         stop_after_attempt=2)
        self._resource_removed.assert_called_once_with('resource', 'e01df65a',
                                                       'resource')
        self.Retrying.assert_called_once_with(wait=we_mock,
                                              reraise=True,
                                              stop=saa_mock)
Ejemplo n.º 3
0
 def remove_vm_on_failure(retry_state):
     logging.info(
         'Detected failure launching or connecting to VM {}'.format(
             vm_name))
     keystone_session = openstack_utils.get_overcloud_keystone_session()
     nova_client = openstack_utils.get_nova_session_client(keystone_session)
     vm = nova_client.servers.find(name=vm_name)
     openstack_utils.resource_removed(
         nova_client.servers,
         vm.id,
         msg="Waiting for the Nova VM {} to be deleted".format(vm.name))
Ejemplo n.º 4
0
 def test_120_volume_force_delete(self):
     """Test force deleting a volume."""
     vol_new = self.cinder_client.volumes.create(name='{}-120-vol'.format(
         self.RESOURCE_PREFIX),
                                                 size='1')
     openstack_utils.resource_reaches_status(self.cinder_client.volumes,
                                             vol_new.id,
                                             expected_status="available",
                                             msg="Volume status wait")
     vol_new.force_delete()
     openstack_utils.resource_removed(self.cinder_client.volumes,
                                      vol_new.id,
                                      msg="Volume")
Ejemplo n.º 5
0
    def tearDownClass(cls):
        """Run class teardown after tests finished."""
        # Cleanup Nova servers
        logging.info('Cleaning up test Nova servers')
        fips_reservations = []
        for vm in cls.nova_client.servers.list():
            fips_reservations += neutron_tests.floating_ips_from_instance(vm)
            vm.delete()
            openstack_utils.resource_removed(
                cls.nova_client.servers,
                vm.id,
                msg="Waiting for the Nova VM {} to be deleted".format(vm.name))

        # Delete FiPs reservations
        logging.info('Cleaning up test FiPs reservations')
        neutron = openstack_utils.get_neutron_session_client(
            session=cls.keystone_session)
        for fip in neutron.list_floatingips()['floatingips']:
            if fip['floating_ip_address'] in fips_reservations:
                neutron.delete_floatingip(fip['id'])

        # Cleanup Manila shares
        logging.info('Cleaning up test shares')
        for share in cls.manila_client.shares.list():
            share.delete()
            openstack_utils.resource_removed(
                cls.manila_client.shares,
                share.id,
                msg="Waiting for the Manila share {} to be deleted".format(
                    share.name))

        # Cleanup test Manila share servers (spawned by the driver when DHSS
        # is enabled).
        logging.info('Cleaning up test shares servers (if found)')
        for server in cls.manila_client.share_servers.list():
            server.delete()
            openstack_utils.resource_removed(
                cls.manila_client.share_servers,
                server.id,
                msg="Waiting for the share server {} to be deleted".format(
                    server.id))
Ejemplo n.º 6
0
 def test_resource_removed(self):
     self.patch_object(openstack_utils, "_resource_removed")
     self._resource_removed.return_value = True
     openstack_utils.resource_removed('resource', 'e01df65a')
     self._resource_removed.assert_called_once_with('resource', 'e01df65a',
                                                    'resource')
Ejemplo n.º 7
0
    def test_410_cinder_vol_create_backup_delete_restore_pool_inspect(self):
        """Create, backup, delete, restore a ceph-backed cinder volume.

        Create, backup, delete, restore a ceph-backed cinder volume, and
        inspect ceph cinder pool object count as the volume is created
        and deleted.
        """
        unit_name = zaza.model.get_lead_unit_name('ceph-mon')
        obj_count_samples = []
        pool_size_samples = []
        pools = ceph_utils.get_ceph_pools(unit_name)
        expected_pool = 'cinder-ceph'
        cinder_ceph_pool = pools[expected_pool]

        # Check ceph cinder pool object count, disk space usage and pool name
        logging.info('Checking ceph cinder pool original samples...')
        pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
            unit_name, cinder_ceph_pool)

        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        self.assertEqual(pool_name, expected_pool)

        # Create ceph-backed cinder volume
        cinder_vol = self.cinder_client.volumes.create(
            name='{}-410-vol'.format(self.RESOURCE_PREFIX), size=1)
        openstack_utils.resource_reaches_status(self.cinder_client.volumes,
                                                cinder_vol.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=30,
                                                expected_status='available',
                                                msg='Volume status wait')

        # Backup the volume
        vol_backup = self.cinder_client.backups.create(
            cinder_vol.id,
            name='{}-410-backup-vol'.format(self.RESOURCE_PREFIX))
        openstack_utils.resource_reaches_status(self.cinder_client.backups,
                                                vol_backup.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=30,
                                                expected_status='available',
                                                msg='Volume status wait')
        # Delete the volume
        openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
        # Restore the volume
        self.cinder_client.restores.restore(vol_backup.id)
        openstack_utils.resource_reaches_status(self.cinder_client.backups,
                                                vol_backup.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=15,
                                                expected_status='available',
                                                msg='Backup status wait')
        # Delete the backup
        openstack_utils.delete_volume_backup(self.cinder_client, vol_backup.id)
        openstack_utils.resource_removed(self.cinder_client.backups,
                                         vol_backup.id,
                                         wait_iteration_max_time=180,
                                         stop_after_attempt=15,
                                         msg="Backup volume")

        # Re-check ceph cinder pool object count and disk usage
        logging.info('Checking ceph cinder pool samples '
                     'after volume create...')
        pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
            unit_name, cinder_ceph_pool, self.model_name)

        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        name = '{}-410-vol'.format(self.RESOURCE_PREFIX)
        vols = self.cinder_client.volumes.list()
        try:
            cinder_vols = [v for v in vols if v.name == name]
        except AttributeError:
            cinder_vols = [v for v in vols if v.display_name == name]
        if not cinder_vols:
            # NOTE(hopem): it appears that at some point cinder-backup stopped
            # restoring volume metadata properly so revert to default name if
            # original is not found
            name = "restore_backup_{}".format(vol_backup.id)
            try:
                cinder_vols = [v for v in vols if v.name == name]
            except AttributeError:
                cinder_vols = [v for v in vols if v.display_name == name]

        self.assertTrue(cinder_vols)

        cinder_vol = cinder_vols[0]

        # Delete restored cinder volume
        openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
        openstack_utils.resource_removed(self.cinder_client.volumes,
                                         cinder_vol.id,
                                         wait_iteration_max_time=180,
                                         stop_after_attempt=15,
                                         msg="Volume")

        @tenacity.retry(wait=tenacity.wait_exponential(multiplier=10, max=300),
                        reraise=True,
                        stop=tenacity.stop_after_attempt(10),
                        retry=tenacity.retry_if_exception_type(AssertionError))
        def _check_get_ceph_pool_sample(obj_count_samples, pool_size_samples):
            pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
                unit_name, cinder_ceph_pool, self.model_name)

            _obj_count_samples = copy.deepcopy(obj_count_samples)
            _pool_size_samples = copy.deepcopy(pool_size_samples)
            _obj_count_samples.append(obj_count)
            _pool_size_samples.append(kb_used)
            # Validate ceph cinder pool object count samples over time
            original, created, deleted = range(3)
            self.assertFalse(
                _obj_count_samples[created] <= _obj_count_samples[original])
            self.assertFalse(
                _obj_count_samples[deleted] >= _obj_count_samples[created])

            # Luminous (pike) ceph seems more efficient at disk usage so we
            # cannot guarantee the ordering of kb_used
            if (openstack_utils.get_os_release() <
                    openstack_utils.get_os_release('xenial_mitaka')):
                self.assertFalse(
                    _pool_size_samples[created] <= _pool_size_samples[original]
                )
                self.assertFalse(
                    _pool_size_samples[deleted] >= _pool_size_samples[created])

        # Final check, ceph cinder pool object count and disk usage
        logging.info('Checking ceph cinder pool after volume delete...')
        # It sometime takes a short time for removal to be reflected in
        # get_ceph_pool_sample so wrap check in tenacity decorator to retry.
        _check_get_ceph_pool_sample(obj_count_samples, pool_size_samples)
Ejemplo n.º 8
0
 def test_resource_removed(self):
     resource_mock = mock.MagicMock()
     resource_mock.list.return_value = [mock.MagicMock(id='ba8204b0')]
     openstack_utils.resource_removed(resource_mock, 'e01df65a')