Exemplo n.º 1
0
def configure_cinder_backup():
    """Configure cinder-backup-swift-proxy."""
    keystone_ip = zaza_model.get_app_ips(
        'swift-keystone')[0]
    swift_ip = zaza_model.get_app_ips(
        'swift-proxy')[0]
    auth_ver = (zaza_model.get_application_config('swift-keystone')
                .get('preferred-api-version').get('value'))
    if auth_ver == 2:
        auth_url = 'http://{}:5000/v2.0'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH_'.format(swift_ip)
    else:
        auth_url = 'http://{}:5000/v3'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH'.format(swift_ip)
    cinder_backup_swift_proxy_conf = {
        'endpoint-url': endpoint_url,
        'auth-url': auth_url
    }
    juju_service = 'cinder-backup-swift-proxy'
    zaza_model.set_application_config(juju_service,
                                      cinder_backup_swift_proxy_conf)
    zaza_model.wait_for_agent_status()
    zaza_model.wait_for_application_states()
    _singleton = zaza.openstack.charm_tests.test_utils.OpenStackBaseTest()
    _singleton.setUpClass()
    with _singleton.config_change(cinder_backup_swift_proxy_conf,
                                  cinder_backup_swift_proxy_conf):
        # wait for configuration to be applied then return
        pass
Exemplo n.º 2
0
    def test_cinder_ceph_restrict_pool_setup(self):
        """Make sure cinder-ceph restrict pool was created successfully."""
        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        pools = zaza_ceph.get_ceph_pools('ceph-mon/0')
        if 'cinder-ceph' not in pools:
            msg = 'cinder-ceph pool was not found upon querying ceph-mon/0'
            raise zaza_exceptions.CephPoolNotFound(msg)

        # Checking for cinder-ceph specific permissions makes
        # the test more rugged when we add additional relations
        # to ceph for other applications (such as glance and nova).
        expected_permissions = [
            "allow rwx pool=cinder-ceph",
            "allow class-read object_prefix rbd_children",
        ]
        cmd = "sudo ceph auth get client.cinder-ceph"
        result = zaza_model.run_on_unit('ceph-mon/0', cmd)
        output = result.get('Stdout').strip()

        for expected in expected_permissions:
            if expected not in output:
                msg = ('cinder-ceph pool restriction ({}) was not'
                       ' configured correctly.'
                       ' Found: {}'.format(expected, output))
                raise zaza_exceptions.CephPoolNotConfigured(msg)
Exemplo n.º 3
0
    def test_ceph_health(self):
        """Make sure ceph-proxy can communicate with ceph."""
        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        self.assertEqual(
            zaza_model.run_on_leader("ceph-proxy", "sudo ceph health")["Code"],
            "0")
Exemplo n.º 4
0
 def test_wait_for_application_states_not_ready_wsmsg(self):
     self._application_states_setup({
         'workload-status':
         'active',
         'workload-status-message':
         'Unit is not ready'
     })
     model.wait_for_application_states('modelname', timeout=1)
     self.assertFalse(self.system_ready)
Exemplo n.º 5
0
def s3_setup():
    """Run setup of s3 options for Trilio."""
    session = openstack_utils.get_overcloud_keystone_session()
    ks_client = openstack_utils.get_keystone_session_client(session)

    # Get token data so we can glean our user_id and project_id
    token_data = ks_client.tokens.get_token_data(session.get_token())
    project_id = token_data['token']['project']['id']
    user_id = token_data['token']['user']['id']

    # Store URL to service providing S3 compatible API
    for entry in token_data['token']['catalog']:
        if entry['type'] == 's3':
            for endpoint in entry['endpoints']:
                if endpoint['interface'] == 'public':
                    s3_region = endpoint['region']
                    s3_endpoint = endpoint['url']

    # Create AWS compatible application credentials in Keystone
    ec2_creds = ks_client.ec2.create(user_id, project_id)
    cacert = openstack_utils.get_cacert()
    kwargs = {
        'region_name': s3_region,
        'aws_access_key_id': ec2_creds.access,
        'aws_secret_access_key': ec2_creds.secret,
        'endpoint_url': s3_endpoint,
        'verify': cacert,
    }
    s3 = boto3.resource('s3', **kwargs)

    # Create bucket
    bucket_name = 'zaza-trilio'
    logging.info("Creating bucket: {}".format(bucket_name))
    bucket = s3.Bucket(bucket_name)
    bucket.create()

    s3_config = {
        'tv-s3-secret-key': ec2_creds.secret,
        'tv-s3-access-key': ec2_creds.access,
        'tv-s3-region-name': s3_region,
        'tv-s3-bucket': bucket_name,
        'tv-s3-endpoint-url': s3_endpoint
    }
    for app in ['trilio-wlm', 'trilio-data-mover']:
        logging.info("Setting s3 config for {}".format(app))
        zaza_model.set_application_config(app, s3_config)
    test_config = lifecycle_utils.get_charm_config(fatal=False)
    states = test_config.get('target_deploy_status', {})
    states['trilio-wlm'] = {
        'workload-status': 'blocked',
        'workload-status-message': 'application not trusted'
    }
    zaza_model.wait_for_application_states(states=test_config.get(
        'target_deploy_status', {}),
                                           timeout=7200)
    zaza_model.block_until_all_units_idle()
Exemplo n.º 6
0
 def test_wait_for_application_states_bespoke_msg(self):
     self._application_states_setup({
         'workload-status': 'active',
         'workload-status-message': 'Sure, I could do something'})
     model.wait_for_application_states(
         'modelname',
         states={'app': {
             'workload-status-message': 'Sure, I could do something'}},
         timeout=1)
     self.assertTrue(self.system_ready)
Exemplo n.º 7
0
 def test_wait_for_application_states_errored_unit(self):
     self._application_states_setup({
         'workload-status':
         'error',
         'workload-status-message':
         'Unit is ready'
     })
     with self.assertRaises(model.UnitError):
         model.wait_for_application_states('modelname', timeout=1)
         self.assertFalse(self.system_ready)
Exemplo n.º 8
0
 def test_wait_for_application_states_idle_timeout(self):
     self._application_states_setup({
         'agent-status': 'executing',
         'workload-status': 'blocked',
         'workload-status-message': 'Sure, I could do something'})
     with self.assertRaises(model.ModelTimeout) as timeout:
         model.wait_for_application_states('modelname', timeout=-2)
     self.assertEqual(
         timeout.exception.args[0],
         "Zaza has timed out waiting on the model to reach idle state.")
Exemplo n.º 9
0
 def test_wait_for_application_states_blocked_ok(self):
     self._application_states_setup({
         'workload-status': 'blocked',
         'workload-status-message': 'Unit is ready'})
     model.wait_for_application_states(
         'modelname',
         states={'app': {
             'workload-status': 'blocked'}},
         timeout=1)
     self.assertTrue(self.system_ready)
Exemplo n.º 10
0
def configure_external_s3_backend():
    """Set up Ceph-radosgw as an external S3 backend for Glance."""
    logging.info("Creating a test S3 user and credentials for Glance")
    username, displayname = "zaza-glance-test", "Zaza Glance Test User"
    cmd = "radosgw-admin user create --uid='{}' --display-name='{}'".format(
        username, displayname)
    results = model.run_on_leader("ceph-mon", cmd)
    stdout = json.loads(results["stdout"])
    keys = stdout["keys"][0]
    access_key, secret_key = keys["access_key"], keys["secret_key"]

    logging.info("Getting S3 endpoint URL of Radosgw from Keystone")
    keystone_auth = openstack_utils.get_overcloud_auth()
    keystone_client = openstack_utils.get_keystone_client(keystone_auth)
    endpoint_url = keystone_client.session.get_endpoint(
        service_type="s3",
        interface="public",
        region="RegionOne",
    )

    logging.info("Creating a test S3 bucket for Glance")
    bucket_name = "zaza-glance-s3-test"
    s3_client = boto3.client(
        "s3",
        endpoint_url=endpoint_url,
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key,
    )
    s3_client.create_bucket(Bucket=bucket_name)

    logging.info("Updating Glance configs with S3 endpoint information")
    model.set_application_config(
        "glance",
        {
            "s3-store-host": endpoint_url,
            "s3-store-access-key": access_key,
            "s3-store-secret-key": secret_key,
            "s3-store-bucket": bucket_name,
        },
    )
    model.wait_for_agent_status()

    logging.info("Waiting for units to reach target states")
    model.wait_for_application_states(
        states={
            "glance": {
                "workload-status": "active",
                "workload-status-message": "Unit is ready",
            }
        })
    model.block_until_all_units_idle()
Exemplo n.º 11
0
def configure_s3_backend():
    """Inject S3 parameters from Swift for Gnocchi config."""
    session = openstack_utils.get_overcloud_keystone_session()
    ks_client = openstack_utils.get_keystone_session_client(session)

    logging.info('Retrieving S3 connection data from Swift')
    token_data = ks_client.tokens.get_token_data(session.get_token())
    project_id = token_data['token']['project']['id']
    user_id = token_data['token']['user']['id']

    # Store URL to service providing S3 compatible API
    for entry in token_data['token']['catalog']:
        if entry['type'] == 's3':
            for endpoint in entry['endpoints']:
                if endpoint['interface'] == 'public':
                    s3_region = endpoint['region']
                    s3_endpoint = endpoint['url']

    # Create AWS compatible application credentials in Keystone
    ec2_creds = ks_client.ec2.create(user_id, project_id)

    logging.info('Changing Gnocchi charm config to connect to S3')
    model.set_application_config(
        'gnocchi', {
            's3-endpoint-url': s3_endpoint,
            's3-region-name': s3_region,
            's3-access-key-id': ec2_creds.access,
            's3-secret-access-key': ec2_creds.secret
        })
    logging.info('Waiting for units to execute config-changed hook')
    model.wait_for_agent_status()
    logging.info('Waiting for units to reach target states')
    model.wait_for_application_states(
        states={
            'gnocchi': {
                'workload-status-': 'active',
                'workload-status-message': 'Unit is ready'
            },
            'ceilometer': {
                'workload-status':
                'blocked',
                'workload-status-message':
                'Run the ' + 'ceilometer-upgrade action on the leader ' +
                'to initialize ceilometer and gnocchi'
            }
        })
    model.block_until_all_units_idle()
Exemplo n.º 12
0
    def test_cinder_ceph_restrict_pool_setup(self):
        """Make sure cinder-ceph restrict pool was created successfully."""
        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        pools = zaza_ceph.get_ceph_pools('ceph-mon/0')
        if 'cinder-ceph' not in pools:
            msg = 'cinder-ceph pool was not found upon querying ceph-mon/0'
            raise zaza_exceptions.CephPoolNotFound(msg)

        expected = "pool=cinder-ceph, allow class-read " \
                   "object_prefix rbd_children"
        cmd = "sudo ceph auth get client.cinder-ceph"
        result = zaza_model.run_on_unit('ceph-mon/0', cmd)
        output = result.get('Stdout').strip()

        if expected not in output:
            msg = ('cinder-ceph pool restriction was not configured correctly.'
                   ' Found: {}'.format(output))
            raise zaza_exceptions.CephPoolNotConfigured(msg)
Exemplo n.º 13
0
Arquivo: setup.py Projeto: lolwww/zaza
def configure_cinder_backup():
    """Configure cinder-backup-swift."""
    keystone_ip = zaza_model.get_app_ips('swift-keystone')[0]
    swift_ip = zaza_model.get_app_ips('swift-proxy')[0]
    auth_ver = (zaza_model.get_application_config('swift-keystone').get(
        'preferred-api-version').get('value'))
    if auth_ver == 2:
        auth_url = 'http://{}:5000/v2.0'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH_'.format(swift_ip)
    else:
        auth_url = 'http://{}:5000/v3'.format(keystone_ip)
        endpoint_url = 'http://{}:8080/v1/AUTH'.format(swift_ip)
    cinder_backup_swift_conf = {
        'endpoint-url': endpoint_url,
        'auth-url': auth_url
    }
    juju_service = 'cinder-backup-swift'
    zaza_model.set_application_config(juju_service, cinder_backup_swift_conf)
    zaza_model.wait_for_application_states()
    time.sleep(300)
Exemplo n.º 14
0
    def test_cache_device(self):
        """Test replacing a disk in use."""
        logging.info('Running add-disk action with a caching device')
        mon = next(iter(zaza_model.get_units('ceph-mon'))).entity_id
        osds = [x.entity_id for x in zaza_model.get_units('ceph-osd')]
        params = []
        for unit in osds:
            loop_dev = self.loop_devs[unit]
            params.append({'unit': unit, 'device': loop_dev})
            action_obj = zaza_model.run_action(unit_name=unit,
                                               action_name='add-disk',
                                               action_params={
                                                   'osd-devices': loop_dev,
                                                   'partition-size': 5
                                               })
            zaza_utils.assertActionRanOK(action_obj)
        zaza_model.wait_for_application_states()

        logging.info('Removing previously added disks')
        for param in params:
            osd_id = self.get_local_osd_id(param['unit'])
            param.update({'osd-id': osd_id})
            action_obj = zaza_model.run_action(unit_name=param['unit'],
                                               action_name='remove-disk',
                                               action_params={
                                                   'osd-ids': osd_id,
                                                   'timeout': 5,
                                                   'format': 'json',
                                                   'purge': False
                                               })
            zaza_utils.assertActionRanOK(action_obj)
            results = json.loads(action_obj.data['results']['message'])
            results = results[next(iter(results))]
            self.assertEqual(results['osd-ids'], osd_id)
            zaza_model.run_on_unit(param['unit'], 'partprobe')
        zaza_model.wait_for_application_states()

        logging.info('Recycling previously removed OSDs')
        for param in params:
            action_obj = zaza_model.run_action(unit_name=param['unit'],
                                               action_name='add-disk',
                                               action_params={
                                                   'osd-devices':
                                                   param['device'],
                                                   'osd-ids': param['osd-id'],
                                                   'partition-size': 4
                                               })
            zaza_utils.assertActionRanOK(action_obj)
        zaza_model.wait_for_application_states()
        self.assertEqual(len(osds) * 2, self.get_num_osds(mon))

        # Finally, remove all the added OSDs that are backed by loop devices.
        for param in params:
            osd_id = self.get_local_osd_id(param['unit'])
            zaza_model.run_action(unit_name=param['unit'],
                                  action_name='remove-disk',
                                  action_params={
                                      'osd-ids': osd_id,
                                      'purge': True
                                  })
Exemplo n.º 15
0
    def config_change(self,
                      default_config,
                      alternate_config,
                      application_name=None):
        """Run change config tests.

        Change config to `alternate_config`, wait for idle workload status,
        yield, return config to `default_config` and wait for idle workload
        status before return from function.

        Example usage:
            with self.config_change({'preferred-api-version': '2'},
                                    {'preferred-api-version': '3'}):
                do_something()

        :param default_config: Dict of charm settings to set on completion
        :type default_config: dict
        :param alternate_config: Dict of charm settings to change to
        :type alternate_config: dict
        :param application_name: String application name for use when called
                                 by a charm under test other than the object's
                                 application.
        :type application_name: str
        """
        if not application_name:
            application_name = self.application_name

        # we need to compare config values to what is already applied before
        # attempting to set them.  otherwise the model will behave differently
        # than we would expect while waiting for completion of the change
        app_config = self.config_current(application_name,
                                         keys=alternate_config.keys())

        if all(item in app_config.items()
               for item in alternate_config.items()):
            logging.debug('alternate_config equals what is already applied '
                          'config')
            yield
            if default_config == alternate_config:
                logging.debug('default_config also equals what is already '
                              'applied config')
                return
            logging.debug('alternate_config already set, and default_config '
                          'needs to be applied before return')
        else:
            logging.debug(
                'Changing charm setting to {}'.format(alternate_config))
            model.set_application_config(
                application_name,
                self._stringed_value_config(alternate_config),
                model_name=self.model_name)

            logging.debug('Waiting for units to execute config-changed hook')
            model.wait_for_agent_status(model_name=self.model_name)

            logging.debug('Waiting for units to reach target states')
            model.wait_for_application_states(model_name=self.model_name,
                                              states=self.test_config.get(
                                                  'target_deploy_status', {}))
            # TODO: Optimize with a block on a specific application until idle.
            model.block_until_all_units_idle()

            yield

        logging.debug('Restoring charm setting to {}'.format(default_config))
        model.set_application_config(
            application_name,
            self._stringed_value_config(default_config),
            model_name=self.model_name)

        logging.debug('Waiting for units to reach target states')
        model.wait_for_application_states(model_name=self.model_name,
                                          states=self.test_config.get(
                                              'target_deploy_status', {}))
        # TODO: Optimize with a block on a specific application until idle.
        model.block_until_all_units_idle()
Exemplo n.º 16
0
    def test_blocked_when_non_pristine_disk_appears(self):
        """Test blocked state with non-pristine disk.

        Validate that charm goes into blocked state when it is presented with
        new block devices that have foreign data on them.
        Instances used in UOSCI has a flavour with ephemeral storage in
        addition to the bootable instance storage.  The ephemeral storage
        device is partitioned, formatted and mounted early in the boot process
        by cloud-init.
        As long as the device is mounted the charm will not attempt to use it.
        If we unmount it and trigger the config-changed hook the block device
        will appear as a new and previously untouched device for the charm.
        One of the first steps of device eligibility checks should be to make
        sure we are seeing a pristine and empty device before doing any
        further processing.
        As the ephemeral device will have data on it we can use it to validate
        that these checks work as intended.
        """
        logging.info('Checking behaviour when non-pristine disks appear...')
        logging.info('Configuring ephemeral-unmount...')
        alternate_conf = {
            'ephemeral-unmount': '/mnt',
            'osd-devices': '/dev/vdb'
        }
        juju_service = 'ceph-osd'
        zaza_model.set_application_config(juju_service, alternate_conf)
        ceph_osd_states = {
            'ceph-osd': {
                'workload-status': 'blocked',
                'workload-status-message': 'Non-pristine'
            }
        }
        zaza_model.wait_for_application_states(states=ceph_osd_states)
        logging.info('Units now in blocked state, running zap-disk action...')
        unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2']
        for unit_name in unit_names:
            zap_disk_params = {
                'devices': '/dev/vdb',
                'i-really-mean-it': True,
            }
            action_obj = zaza_model.run_action(unit_name=unit_name,
                                               action_name='zap-disk',
                                               action_params=zap_disk_params)
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Running add-disk action...')
        for unit_name in unit_names:
            add_disk_params = {
                'osd-devices': '/dev/vdb',
            }
            action_obj = zaza_model.run_action(unit_name=unit_name,
                                               action_name='add-disk',
                                               action_params=add_disk_params)
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        logging.info('OK')

        set_default = {
            'ephemeral-unmount': '',
            'osd-devices': '/dev/vdb',
        }

        current_release = zaza_openstack.get_os_release()
        bionic_train = zaza_openstack.get_os_release('bionic_train')
        if current_release < bionic_train:
            set_default['osd-devices'] = '/dev/vdb /srv/ceph'

        logging.info('Restoring to default configuration...')
        zaza_model.set_application_config(juju_service, set_default)

        zaza_model.wait_for_application_states()
Exemplo n.º 17
0
    def config_change(self, default_config, alternate_config):
        """Run change config tests.

        Change config to `alternate_config`, wait for idle workload status,
        yield, return config to `default_config` and wait for idle workload
        status before return from function.

        Example usage:
            with self.config_change({'preferred-api-version': '2'},
                                    {'preferred-api-version': '3'}):
                do_something()

        :param default_config: Dict of charm settings to set on completion
        :type default_config: dict
        :param alternate_config: Dict of charm settings to change to
        :type alternate_config: dict
        """
        # we need to compare config values to what is already applied before
        # attempting to set them.  otherwise the model will behave differently
        # than we would expect while waiting for completion of the change
        _app_config = model.get_application_config(self.application_name)
        app_config = {}
        # convert the more elaborate config structure from libjuju to something
        # we can compare to what the caller supplies to this function
        for k in alternate_config.keys():
            # note that conversion to string for all values is due to
            # attempting to set any config with other types lead to Traceback
            app_config[k] = str(_app_config.get(k, {}).get('value', ''))
        if all(item in app_config.items()
               for item in alternate_config.items()):
            logging.debug('alternate_config equals what is already applied '
                          'config')
            yield
            if default_config == alternate_config:
                logging.debug('default_config also equals what is already '
                              'applied config')
                return
            logging.debug('alternate_config already set, and default_config '
                          'needs to be applied before return')
        else:
            logging.debug(
                'Changing charm setting to {}'.format(alternate_config))
            model.set_application_config(self.application_name,
                                         alternate_config,
                                         model_name=self.model_name)

            logging.debug('Waiting for units to execute config-changed hook')
            model.wait_for_agent_status(model_name=self.model_name)

            logging.debug('Waiting for units to reach target states')
            model.wait_for_application_states(model_name=self.model_name,
                                              states=self.test_config.get(
                                                  'target_deploy_status', {}))
            # TODO: Optimize with a block on a specific application until idle.
            model.block_until_all_units_idle()

            yield

        logging.debug('Restoring charm setting to {}'.format(default_config))
        model.set_application_config(self.application_name,
                                     default_config,
                                     model_name=self.model_name)

        logging.debug('Waiting for units to reach target states')
        model.wait_for_application_states(model_name=self.model_name,
                                          states=self.test_config.get(
                                              'target_deploy_status', {}))
        # TODO: Optimize with a block on a specific application until idle.
        model.block_until_all_units_idle()
Exemplo n.º 18
0
#!/usr/bin/env python3

import asyncio
from zaza import model
from zaza.openstack.utilities import (
    cli as cli_utils,
    openstack,
)

if __name__ == "__main__":
    cli_utils.setup_logging()
    target_model = model.get_juju_model()
    model.wait_for_application_states(
        model_name=target_model, states=openstack.WORKLOAD_STATUS_EXCEPTIONS)
    asyncio.get_event_loop().close()
Exemplo n.º 19
0
 wl_statuses = copy.deepcopy(openstack.WORKLOAD_STATUS_EXCEPTIONS)
 os_version = openstack.get_current_os_versions(
     'designate').get('designate')
 if os_version <= 'pike':
     # Remove the memcached relation to disable designate. This is a
     # workaround for Bug #1848307
     logging.info("Removing designate memcached relation")
     model.remove_relation(
         'designate',
         'coordinator-memcached',
         'memcached:cache')
     wl_statuses['designate'] = {
         'workload-status-message': """'coordinator-memcached' missing""",
         'workload-status': 'blocked'}
 logging.info("Waiting for statuses with exceptions ...")
 model.wait_for_application_states(
     states=wl_statuses)
 certificate_directory = mojo_utils.get_local_certificate_directory()
 certfile = mojo_utils.get_overcloud_cacert_file()
 logging.info("Vault setup basic ...")
 vault_setup.basic_setup(cacert=certfile)
 clients = vault_utils.get_clients(cacert=certfile)
 vault_creds = vault_utils.get_credentails()
 vault_utils.unseal_all(clients, vault_creds['keys'][0])
 action = vault_utils.run_charm_authorize(
     vault_creds['root_token'])
 action = vault_utils.run_get_csr()
 intermediate_csr = action.data['results']['output']
 with open(os.path.join(certificate_directory, 'ca.key'), 'rb') as f:
     cakey = f.read()
 with open(os.path.join(certificate_directory, 'cacert.pem'), 'rb') as f:
     cacert = f.read()
Exemplo n.º 20
0
    def test_blocked_when_non_pristine_disk_appears(self):
        """Test blocked state with non-pristine disk.

        Validate that charm goes into blocked state when it is presented with
        new block devices that have foreign data on them.
        Instances used in UOSCI has a flavour with ephemeral storage in
        addition to the bootable instance storage.  The ephemeral storage
        device is partitioned, formatted and mounted early in the boot process
        by cloud-init.
        As long as the device is mounted the charm will not attempt to use it.
        If we unmount it and trigger the config-changed hook the block device
        will appear as a new and previously untouched device for the charm.
        One of the first steps of device eligibility checks should be to make
        sure we are seeing a pristine and empty device before doing any
        further processing.
        As the ephemeral device will have data on it we can use it to validate
        that these checks work as intended.
        """
        current_release = zaza_openstack.get_os_release()
        focal_ussuri = zaza_openstack.get_os_release('focal_ussuri')
        if current_release >= focal_ussuri:
            # NOTE(ajkavanagh) - focal (on ServerStack) is broken for /dev/vdb
            # and so this test can't pass: LP#1842751 discusses the issue, but
            # basically the snapd daemon along with lxcfs results in /dev/vdb
            # being mounted in the lxcfs process namespace.  If the charm
            # 'tries' to umount it, it can (as root), but the mount is still
            # 'held' by lxcfs and thus nothing else can be done with it.  This
            # is only a problem in serverstack with images with a default
            # /dev/vdb ephemeral
            logging.warn("Skipping pristine disk test for focal and higher")
            return
        logging.info('Checking behaviour when non-pristine disks appear...')
        logging.info('Configuring ephemeral-unmount...')
        alternate_conf = {
            'ephemeral-unmount': '/mnt',
            'osd-devices': '/dev/vdb'
        }
        juju_service = 'ceph-osd'
        zaza_model.set_application_config(juju_service, alternate_conf)
        ceph_osd_states = {
            'ceph-osd': {
                'workload-status': 'blocked',
                'workload-status-message': 'Non-pristine'
            }
        }
        zaza_model.wait_for_application_states(states=ceph_osd_states)
        logging.info('Units now in blocked state, running zap-disk action...')
        unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2']
        for unit_name in unit_names:
            zap_disk_params = {
                'devices': '/dev/vdb',
                'i-really-mean-it': True,
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='zap-disk',
                action_params=zap_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Running add-disk action...')
        for unit_name in unit_names:
            add_disk_params = {
                'osd-devices': '/dev/vdb',
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='add-disk',
                action_params=add_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        logging.info('OK')

        set_default = {
            'ephemeral-unmount': '',
            'osd-devices': '/dev/vdb',
        }

        current_release = zaza_openstack.get_os_release()
        bionic_train = zaza_openstack.get_os_release('bionic_train')
        if current_release < bionic_train:
            set_default['osd-devices'] = '/dev/vdb /srv/ceph'

        logging.info('Restoring to default configuration...')
        zaza_model.set_application_config(juju_service, set_default)

        zaza_model.wait_for_application_states()