def test_get_ceph_pools(self):
        self.patch_object(model, 'run_on_unit')

        # Bad return code
        result = {
            'Code': '1',
            'Stdout': '',
            'Stderr': 'something went wrong',
        }
        self.run_on_unit.return_value = result
        with self.assertRaises(model.CommandRunFailed):
            ceph_utils.get_ceph_pools('ceph-mon/0')

        # Xenial Queens output
        result = {
            'Code': '0',
            'Stdout': '1 cinder-ceph,2 glance,',
            'Stderr': ''
        }
        self.run_on_unit.return_value = result
        expected = {'cinder-ceph': 1, 'glance': 2}
        actual = ceph_utils.get_ceph_pools('ceph-mon/0')
        self.assertEqual(expected, actual)
        # Bionic Queens output
        result = {
            'Code': '0',
            'Stdout': '1 cinder-ceph\n2 glance',
            'Stderr': ''
        }
        self.run_on_unit.return_value = result
        expected = {'cinder-ceph': 1, 'glance': 2}
        actual = ceph_utils.get_ceph_pools('ceph-mon/0')
        self.assertEqual(expected, actual)
Exemplo n.º 2
0
    def test_ceph_check_osd_pools(self):
        """Check OSD pools.

        Check osd pools on all ceph units, expect them to be
        identical, and expect specific pools to be present.
        """
        logging.info('Checking pools on ceph units...')

        expected_pools = zaza_ceph.get_expected_pools()
        results = []
        unit_name = 'ceph-mon/0'

        # Check for presence of expected pools on each unit
        logging.debug('Expected pools: {}'.format(expected_pools))
        pools = zaza_ceph.get_ceph_pools(unit_name)
        results.append(pools)

        for expected_pool in expected_pools:
            if expected_pool not in pools:
                msg = ('{} does not have pool: '
                       '{}'.format(unit_name, expected_pool))
                raise zaza_exceptions.CephPoolNotFound(msg)
        logging.debug('{} has (at least) the expected '
                      'pools.'.format(unit_name))

        # Check that all units returned the same pool name:id data
        for i, result in enumerate(results):
            for other in results[i + 1:]:
                logging.debug('result: {}, other: {}'.format(result, other))
                self.assertEqual(result, other)
Exemplo n.º 3
0
    def test_cinder_ceph_restrict_pool_setup(self):
        """Make sure cinder-ceph restrict pool was created successfully."""
        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        pools = zaza_ceph.get_ceph_pools('ceph-mon/0')
        if 'cinder-ceph' not in pools:
            msg = 'cinder-ceph pool was not found upon querying ceph-mon/0'
            raise zaza_exceptions.CephPoolNotFound(msg)

        # Checking for cinder-ceph specific permissions makes
        # the test more rugged when we add additional relations
        # to ceph for other applications (such as glance and nova).
        expected_permissions = [
            "allow rwx pool=cinder-ceph",
            "allow class-read object_prefix rbd_children",
        ]
        cmd = "sudo ceph auth get client.cinder-ceph"
        result = zaza_model.run_on_unit('ceph-mon/0', cmd)
        output = result.get('Stdout').strip()

        for expected in expected_permissions:
            if expected not in output:
                msg = ('cinder-ceph pool restriction ({}) was not'
                       ' configured correctly.'
                       ' Found: {}'.format(expected, output))
                raise zaza_exceptions.CephPoolNotConfigured(msg)
Exemplo n.º 4
0
    def test_cinder_ceph_restrict_pool_setup(self):
        """Make sure cinder-ceph restrict pool was created successfully."""
        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        pools = zaza_ceph.get_ceph_pools('ceph-mon/0')
        if 'cinder-ceph' not in pools:
            msg = 'cinder-ceph pool was not found upon querying ceph-mon/0'
            raise zaza_exceptions.CephPoolNotFound(msg)

        expected = "pool=cinder-ceph, allow class-read " \
                   "object_prefix rbd_children"
        cmd = "sudo ceph auth get client.cinder-ceph"
        result = zaza_model.run_on_unit('ceph-mon/0', cmd)
        output = result.get('Stdout').strip()

        if expected not in output:
            msg = ('cinder-ceph pool restriction was not configured correctly.'
                   ' Found: {}'.format(output))
            raise zaza_exceptions.CephPoolNotConfigured(msg)
Exemplo n.º 5
0
    def test_410_cinder_vol_create_backup_delete_restore_pool_inspect(self):
        """Create, backup, delete, restore a ceph-backed cinder volume.

        Create, backup, delete, restore a ceph-backed cinder volume, and
        inspect ceph cinder pool object count as the volume is created
        and deleted.
        """
        unit_name = zaza.model.get_lead_unit_name('ceph-mon')
        obj_count_samples = []
        pool_size_samples = []
        pools = ceph_utils.get_ceph_pools(unit_name)
        expected_pool = 'cinder-ceph'
        cinder_ceph_pool = pools[expected_pool]

        # Check ceph cinder pool object count, disk space usage and pool name
        logging.info('Checking ceph cinder pool original samples...')
        pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
            unit_name, cinder_ceph_pool)

        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        self.assertEqual(pool_name, expected_pool)

        # Create ceph-backed cinder volume
        cinder_vol = self.cinder_client.volumes.create(
            name='{}-410-vol'.format(self.RESOURCE_PREFIX), size=1)
        openstack_utils.resource_reaches_status(self.cinder_client.volumes,
                                                cinder_vol.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=30,
                                                expected_status='available',
                                                msg='Volume status wait')

        # Backup the volume
        vol_backup = self.cinder_client.backups.create(
            cinder_vol.id,
            name='{}-410-backup-vol'.format(self.RESOURCE_PREFIX))
        openstack_utils.resource_reaches_status(self.cinder_client.backups,
                                                vol_backup.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=30,
                                                expected_status='available',
                                                msg='Volume status wait')
        # Delete the volume
        openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
        # Restore the volume
        self.cinder_client.restores.restore(vol_backup.id)
        openstack_utils.resource_reaches_status(self.cinder_client.backups,
                                                vol_backup.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=15,
                                                expected_status='available',
                                                msg='Backup status wait')
        # Delete the backup
        openstack_utils.delete_volume_backup(self.cinder_client, vol_backup.id)
        openstack_utils.resource_removed(self.cinder_client.backups,
                                         vol_backup.id,
                                         wait_iteration_max_time=180,
                                         stop_after_attempt=15,
                                         msg="Backup volume")

        # Re-check ceph cinder pool object count and disk usage
        logging.info('Checking ceph cinder pool samples '
                     'after volume create...')
        pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
            unit_name, cinder_ceph_pool, self.model_name)

        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        name = '{}-410-vol'.format(self.RESOURCE_PREFIX)
        vols = self.cinder_client.volumes.list()
        try:
            cinder_vols = [v for v in vols if v.name == name]
        except AttributeError:
            cinder_vols = [v for v in vols if v.display_name == name]
        if not cinder_vols:
            # NOTE(hopem): it appears that at some point cinder-backup stopped
            # restoring volume metadata properly so revert to default name if
            # original is not found
            name = "restore_backup_{}".format(vol_backup.id)
            try:
                cinder_vols = [v for v in vols if v.name == name]
            except AttributeError:
                cinder_vols = [v for v in vols if v.display_name == name]

        self.assertTrue(cinder_vols)

        cinder_vol = cinder_vols[0]

        # Delete restored cinder volume
        openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
        openstack_utils.resource_removed(self.cinder_client.volumes,
                                         cinder_vol.id,
                                         wait_iteration_max_time=180,
                                         stop_after_attempt=15,
                                         msg="Volume")

        @tenacity.retry(wait=tenacity.wait_exponential(multiplier=10, max=300),
                        reraise=True,
                        stop=tenacity.stop_after_attempt(10),
                        retry=tenacity.retry_if_exception_type(AssertionError))
        def _check_get_ceph_pool_sample(obj_count_samples, pool_size_samples):
            pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
                unit_name, cinder_ceph_pool, self.model_name)

            _obj_count_samples = copy.deepcopy(obj_count_samples)
            _pool_size_samples = copy.deepcopy(pool_size_samples)
            _obj_count_samples.append(obj_count)
            _pool_size_samples.append(kb_used)
            # Validate ceph cinder pool object count samples over time
            original, created, deleted = range(3)
            self.assertFalse(
                _obj_count_samples[created] <= _obj_count_samples[original])
            self.assertFalse(
                _obj_count_samples[deleted] >= _obj_count_samples[created])

            # Luminous (pike) ceph seems more efficient at disk usage so we
            # cannot guarantee the ordering of kb_used
            if (openstack_utils.get_os_release() <
                    openstack_utils.get_os_release('xenial_mitaka')):
                self.assertFalse(
                    _pool_size_samples[created] <= _pool_size_samples[original]
                )
                self.assertFalse(
                    _pool_size_samples[deleted] >= _pool_size_samples[created])

        # Final check, ceph cinder pool object count and disk usage
        logging.info('Checking ceph cinder pool after volume delete...')
        # It sometime takes a short time for removal to be reflected in
        # get_ceph_pool_sample so wrap check in tenacity decorator to retry.
        _check_get_ceph_pool_sample(obj_count_samples, pool_size_samples)