コード例 #1
0
    def test_key_distribution_and_rotation(self):
        """Verify key rotation.

        Note that we make the assumption that test bundle configure
        `token-expiration` to 60 and that it takes > 60s from deployment
        completes until we get to this test.
        """
        if (openstack_utils.get_os_release() <
                openstack_utils.get_os_release('xenial_ocata')):
            logging.info('skipping test < xenial_ocata')
            return

        with self.pause_resume(['apache2']):
            KEY_KEY_REPOSITORY = 'key_repository'
            CREDENTIAL_KEY_REPOSITORY = '/etc/keystone/credential-keys/'
            FERNET_KEY_REPOSITORY = '/etc/keystone/fernet-keys/'

            # get key repostiroy from leader storage
            key_repository = json.loads(juju_utils.leader_get(
                self.application_name, KEY_KEY_REPOSITORY))
            # sort keys so we can compare it to on-disk repositories
            key_repository = json.loads(json.dumps(
                key_repository, sort_keys=True),
                object_pairs_hook=collections.OrderedDict)
            logging.info('key_repository: "{}"'
                         .format(pprint.pformat(key_repository)))
            for repo in [CREDENTIAL_KEY_REPOSITORY, FERNET_KEY_REPOSITORY]:
                try:
                    for key_name, key in key_repository[repo].items():
                        if int(key_name) > 1:
                            # after initialization the repository contains the
                            # staging key (0) and the primary key (1).  After
                            # rotation the repository contains at least one key
                            # with higher index.
                            break
                    else:
                        # NOTE the charm should only rotate the fernet key
                        # repostiory and not rotate the credential key
                        # repository.
                        if repo == FERNET_KEY_REPOSITORY:
                            raise zaza_exceptions.KeystoneKeyRepositoryError(
                                'Keys in Fernet key repository has not been '
                                'rotated.')
                except KeyError:
                    raise zaza_exceptions.KeystoneKeyRepositoryError(
                        'Dict in leader setting "{}" does not contain key '
                        'repository "{}"'.format(KEY_KEY_REPOSITORY, repo))

            # get on-disk key repository from all units
            on_disk = {}
            units = zaza.model.get_units(self.application_name)
            for unit in units:
                on_disk[unit.entity_id] = {}
                for repo in [CREDENTIAL_KEY_REPOSITORY, FERNET_KEY_REPOSITORY]:
                    on_disk[unit.entity_id][repo] = {}
                    result = zaza.model.run_on_unit(
                        unit.entity_id, 'sudo ls -1 {}'.format(repo))
                    for key_name in result.get('Stdout').split():
                        result = zaza.model.run_on_unit(
                            unit.entity_id,
                            'sudo cat {}/{}'.format(repo, key_name))
                        on_disk[unit.entity_id][repo][key_name] = result.get(
                            'Stdout')
            # sort keys so we can compare it to leader storage repositories
            on_disk = json.loads(
                json.dumps(on_disk, sort_keys=True),
                object_pairs_hook=collections.OrderedDict)
            logging.info('on_disk: "{}"'.format(pprint.pformat(on_disk)))

            for unit in units:
                unit_repo = on_disk[unit.entity_id]
                lead_repo = key_repository
                if unit_repo != lead_repo:
                    raise zaza_exceptions.KeystoneKeyRepositoryError(
                        'expect: "{}" actual({}): "{}"'
                        .format(pprint.pformat(lead_repo), unit.entity_id,
                                pprint.pformat(unit_repo)))
                logging.info('"{}" == "{}"'
                             .format(pprint.pformat(unit_repo),
                                     pprint.pformat(lead_repo)))
コード例 #2
0
class NovaCloudController(test_utils.OpenStackBaseTest):
    """Run nova-cloud-controller specific tests."""

    XENIAL_MITAKA = openstack_utils.get_os_release('xenial_mitaka')
    XENIAL_OCATA = openstack_utils.get_os_release('xenial_ocata')
    XENIAL_QUEENS = openstack_utils.get_os_release('xenial_queens')
    BIONIC_QUEENS = openstack_utils.get_os_release('bionic_queens')
    BIONIC_ROCKY = openstack_utils.get_os_release('bionic_rocky')
    BIONIC_TRAIN = openstack_utils.get_os_release('bionic_train')

    @classmethod
    def setUpClass(cls):
        """Run class setup for running nova-cloud-controller tests."""
        super(NovaCloudController, cls).setUpClass()
        cls.current_release = openstack_utils.get_os_release()

    @property
    def services(self):
        """Return a list of services for the selected OpenStack release."""
        services = ['nova-scheduler', 'nova-conductor']
        if self.current_release <= self.BIONIC_QUEENS:
            services.append('nova-api-os-compute')
        if self.current_release <= self.XENIAL_MITAKA:
            services.append('nova-cert')
        if self.current_release >= self.XENIAL_OCATA:
            services.append('apache2')
        return services

    def test_104_compute_api_functionality(self):
        """Verify basic compute API functionality."""
        logging.info('Instantiating nova client...')
        keystone_session = openstack_utils.get_overcloud_keystone_session()
        nova = openstack_utils.get_nova_session_client(keystone_session)

        logging.info('Checking api functionality...')

        actual_service_names = [
            service.to_dict()['binary'] for service in nova.services.list()
        ]
        for expected_service_name in ('nova-scheduler', 'nova-conductor',
                                      'nova-compute'):
            assert (expected_service_name in actual_service_names)

        # Thanks to setup.create_flavors we should have a few flavors already:
        assert (len(nova.flavors.list()) > 0)

        # Just checking it's not raising and returning an iterable:
        assert (len(nova.servers.list()) >= 0)

    def test_106_compute_catalog_endpoints(self):
        """Verify that the compute endpoints are present in the catalog."""
        overcloud_auth = openstack_utils.get_overcloud_auth()
        keystone_client = openstack_utils.get_keystone_client(overcloud_auth)
        actual_endpoints = keystone_client.service_catalog.get_endpoints()

        logging.info('Checking compute endpoints...')

        if self.current_release < self.XENIAL_QUEENS:
            actual_compute_endpoints = actual_endpoints['compute'][0]
            for expected_url in ('internalURL', 'adminURL', 'publicURL'):
                assert (expected_url in actual_compute_endpoints)
        else:
            actual_compute_interfaces = [
                endpoint['interface']
                for endpoint in actual_endpoints['compute']
            ]
            for expected_interface in ('internal', 'admin', 'public'):
                assert (expected_interface in actual_compute_interfaces)

    def test_220_nova_metadata_propagate(self):
        """Verify that the vendor-data settings are propagated.

        Change vendor-data-url and assert that change propagates to the correct
        file and that services are restarted as a result
        """
        if self.current_release < self.BIONIC_ROCKY:
            logging.info("Feature didn't exist before Rocky. Nothing to test")
            return

        # Expected default and alternate values
        current_value = zaza.model.get_application_config(
            'nova-cloud-controller')['vendor-data-url']['value']
        new_value = 'http://some-other.url/vdata'

        set_default = {'vendor-data-url': current_value}
        set_alternate = {'vendor-data-url': new_value}
        default_entry = {
            'api': {
                'vendordata_dynamic_targets': [current_value]
            }
        }
        alternate_entry = {'api': {'vendordata_dynamic_targets': [new_value]}}

        # Config file affected by juju set config change
        conf_file = '/etc/nova/nova.conf'

        # Make config change, check for service restarts
        logging.info('Setting config on nova-cloud-controller to {}'.format(
            set_alternate))
        self.restart_on_changed(conf_file, set_default, set_alternate,
                                default_entry, alternate_entry, self.services)

    def test_302_api_rate_limiting_is_enabled(self):
        """Check that API rate limiting is enabled."""
        logging.info('Checking api-paste config file data...')
        zaza.model.block_until_oslo_config_entries_match(
            'nova-cloud-controller', '/etc/nova/api-paste.ini', {
                'filter:legacy_ratelimit': {
                    'limits': ["( POST, '*', .*, 9999, MINUTE );"]
                }
            })

    def test_310_pci_alias_config(self):
        """Verify that the pci alias data is rendered properly.

        Change pci-alias and assert that change propagates to the correct
        file and that services are restarted as a result
        """
        logging.info('Checking pci aliases in nova config...')

        # Expected default and alternate values
        current_value = zaza.model.get_application_config(
            'nova-cloud-controller')['pci-alias']
        try:
            current_value = current_value['value']
        except KeyError:
            current_value = None
        new_value = '[{}, {}]'.format(
            json.dumps(
                {
                    'name': 'IntelNIC',
                    'capability_type': 'pci',
                    'product_id': '1111',
                    'vendor_id': '8086',
                    'device_type': 'type-PF'
                },
                sort_keys=True),
            json.dumps(
                {
                    'name': ' Cirrus Logic ',
                    'capability_type': 'pci',
                    'product_id': '0ff2',
                    'vendor_id': '10de',
                    'device_type': 'type-PCI'
                },
                sort_keys=True))

        set_default = {'pci-alias': current_value}
        set_alternate = {'pci-alias': new_value}

        expected_conf_section = 'DEFAULT'
        expected_conf_key = 'pci_alias'
        if self.current_release >= self.XENIAL_OCATA:
            expected_conf_section = 'pci'
            expected_conf_key = 'alias'

        default_entry = {expected_conf_section: {}}
        alternate_entry = {
            expected_conf_section: {
                expected_conf_key:
                [('{"capability_type": "pci", "device_type": "type-PF", '
                  '"name": "IntelNIC", "product_id": "1111", '
                  '"vendor_id": "8086"}'),
                 ('{"capability_type": "pci", "device_type": "type-PCI", '
                  '"name": " Cirrus Logic ", "product_id": "0ff2", '
                  '"vendor_id": "10de"}')]
            }
        }

        # Config file affected by juju set config change
        conf_file = '/etc/nova/nova.conf'

        # Make config change, check for service restarts
        logging.info('Setting config on nova-cloud-controller to {}'.format(
            set_alternate))
        self.restart_on_changed(conf_file, set_default, set_alternate,
                                default_entry, alternate_entry, self.services)

    def test_900_restart_on_config_change(self):
        """Checking restart happens on config change.

        Change debug mode and assert that change propagates to the correct
        file and that services are restarted as a result
        """
        # Config file affected by juju set config change
        conf_file = '/etc/nova/nova.conf'

        # Make config change, check for service restarts
        logging.info('Changing debug config on nova-cloud-controller')
        self.restart_on_changed_debug_oslo_config_file(conf_file,
                                                       self.services)

    def test_901_pause_resume(self):
        """Run pause and resume tests.

        Pause service and check services are stopped then resume and check
        they are started
        """
        with self.pause_resume(self.services):
            logging.info("Testing pause resume")

    def test_902_quota_settings(self):
        """Verify that the quota settings are propagated.

        Change quota-instances and assert that change propagates to the correct
        file and that services are restarted as a result
        """
        # Expected default and alternate values
        current_value = zaza.model.get_application_config(
            'nova-cloud-controller')['quota-instances']
        try:
            current_value = current_value['value']
        except KeyError:
            current_value = 0
        new_value = '20'

        set_default = {'quota-instances': current_value}
        set_alternate = {'quota-instances': new_value}

        expected_conf_section = 'DEFAULT'
        expected_conf_key = 'quota_instances'
        if self.current_release >= self.XENIAL_OCATA:
            expected_conf_section = 'quota'
            expected_conf_key = 'instances'

        default_entry = {expected_conf_section: {}}
        alternate_entry = {
            expected_conf_section: {
                expected_conf_key: [new_value]
            }
        }

        # Config file affected by juju set config change
        conf_file = '/etc/nova/nova.conf'

        # Make config change, check for service restarts
        logging.info('Setting config on nova-cloud-controller to {}'.format(
            set_alternate))
        self.restart_on_changed(conf_file, set_default, set_alternate,
                                default_entry, alternate_entry, self.services)

    def test_903_enable_quota_count_usage_from_placement(self):
        """Verify that quota-count-usage-from-placement is propagated.

        Change quota-count-usage-from-placement and assert that nova
        configuration file is updated and the services are restarted.
        This parameter is not supported for releases<Train. In those
        cases assert that nova configuration file is not updated.
        """
        # Expected default and alternate values
        current_value = zaza.model.get_application_config(
            'nova-cloud-controller')['quota-count-usage-from-placement']
        try:
            current_value = current_value['value']
        except KeyError:
            current_value = False

        new_value = not current_value
        new_value_str = str(new_value).title()
        current_value_str = str(current_value).title()

        set_default = {'quota-count-usage-from-placement': current_value}
        set_alternate = {'quota-count-usage-from-placement': new_value}

        expected_conf_section = 'quota'
        expected_conf_key = 'count_usage_from_placement'

        # In case quota-count-usage-from-placement is False, the quota
        # section  in nova conf file is empty
        if current_value:
            default_entry = {
                expected_conf_section: {
                    expected_conf_key: [current_value_str]
                }
            }
            alternate_entry = {expected_conf_section: {}}
        else:
            default_entry = {expected_conf_section: {}}
            alternate_entry = {
                expected_conf_section: {
                    expected_conf_key: [new_value_str]
                }
            }

        # Config file affected by juju set config change
        conf_file = '/etc/nova/nova.conf'

        if self.current_release < self.BIONIC_TRAIN:
            # Configuration is not supported in releases<Train
            default_entry = {expected_conf_section: {}}
            alternate_entry = {expected_conf_section: {}}
            services = {}
        else:
            services = self.services

        # Make config change, check for service restarts
        logging.info('Setting config on nova-cloud-controller to {}'.format(
            set_alternate))
        self.restart_on_changed(conf_file, set_default, set_alternate,
                                default_entry, alternate_entry, services)
コード例 #3
0
    def test_500_ceph_alternatives_cleanup(self):
        """Check ceph alternatives removed when ceph-mon relation is broken."""
        # Skip this test if release is less than xenial_ocata as in that case
        # cinder HAS a relation with ceph directly and this test would fail
        current_release = openstack_utils.get_os_release()
        xenial_ocata = openstack_utils.get_os_release('xenial_ocata')
        if current_release < xenial_ocata:
            logging.info("Skipping test as release < xenial-ocata")
            return

        units = zaza.model.get_units("cinder-ceph", model_name=self.model_name)

        # check each unit prior to breaking relation
        for unit in units:
            dir_list = directory_listing(unit.name, "/etc/ceph")
            if 'ceph.conf' in dir_list:
                logging.debug(
                    "/etc/ceph/ceph.conf exists BEFORE relation-broken")
            else:
                raise zaza_exceptions.CephGenericError(
                    "unit: {} - /etc/ceph/ceph.conf does not exist "
                    "BEFORE relation-broken".format(unit.name))

        # remove the relation so that /etc/ceph/ceph.conf is removed
        logging.info("Removing ceph-mon:client <-> cinder-ceph:ceph relation")
        zaza.model.remove_relation("ceph-mon", "ceph-mon:client",
                                   "cinder-ceph:ceph")
        # zaza.model.wait_for_agent_status()
        logging.info("Wait till relation is removed...")
        ceph_mon_units = zaza.model.get_units("ceph-mon",
                                              model_name=self.model_name)
        conditions = [
            invert_condition(
                does_relation_exist(u.name, "ceph-mon", "cinder-ceph", "ceph",
                                    self.model_name)) for u in ceph_mon_units
        ]
        zaza.model.block_until(*conditions)

        logging.info("Checking each unit after breaking relation...")
        for unit in units:
            dir_list = directory_listing(unit.name, "/etc/ceph")
            if 'ceph.conf' not in dir_list:
                logging.debug(
                    "/etc/ceph/ceph.conf removed AFTER relation-broken")
            else:
                raise zaza_exceptions.CephGenericError(
                    "unit: {} - /etc/ceph/ceph.conf still exists "
                    "AFTER relation-broken".format(unit.name))

        # Restore cinder-ceph and ceph-mon relation to keep tests idempotent
        logging.info("Restoring ceph-mon:client <-> cinder-ceph:ceph relation")
        zaza.model.add_relation("ceph-mon", "ceph-mon:client",
                                "cinder-ceph:ceph")
        conditions = [
            does_relation_exist(u.name, "ceph-mon", "cinder-ceph", "ceph",
                                self.model_name) for u in ceph_mon_units
        ]
        logging.info("Wait till model is idle ...")
        zaza.model.block_until(*conditions)
        zaza.model.block_until_all_units_idle()
        logging.info("... Done.")
コード例 #4
0
    def test_end_user_access_and_token(self):
        """Verify regular end-user access resources and validate token data.

        In effect this also validates user creation, presence of standard
        roles (`_member_`, `Member`), effect of policy and configuration
        of `token-provider`.
        """
        def _validate_token_data(openrc):
            if self.tls_rid:
                openrc['OS_CACERT'] = openstack_utils.get_cacert()
                openrc['OS_AUTH_URL'] = (
                    openrc['OS_AUTH_URL'].replace('http', 'https'))
            logging.info('keystone IP {}'.format(ip))
            keystone_session = openstack_utils.get_keystone_session(
                openrc)
            keystone_client = openstack_utils.get_keystone_session_client(
                keystone_session)
            token = keystone_session.get_token()
            if (openstack_utils.get_os_release() <
                    openstack_utils.get_os_release('xenial_ocata')):
                if len(token) != 32:
                    raise zaza_exceptions.KeystoneWrongTokenProvider(
                        'We expected a UUID token and got this: "{}"'
                        .format(token))
            else:
                if len(token) < 180:
                    raise zaza_exceptions.KeystoneWrongTokenProvider(
                        'We expected a Fernet token and got this: "{}"'
                        .format(token))
            logging.info('token: "{}"'.format(pprint.pformat(token)))

            if (openstack_utils.get_os_release() <
                    openstack_utils.get_os_release('trusty_mitaka')):
                logging.info('skip: tokens.get_token_data() not allowed prior '
                             'to trusty_mitaka')
                return
            # get_token_data call also gets the service catalog
            token_data = keystone_client.tokens.get_token_data(token)
            if token_data.get('token', {}).get('catalog', None) is None:
                raise zaza_exceptions.KeystoneAuthorizationStrict(
                    # NOTE(fnordahl) the above call will probably throw a
                    # http.Forbidden exception, but just in case
                    'Regular end user not allowed to retrieve the service '
                    'catalog. ("{}")'.format(pprint.pformat(token_data)))
            logging.info('token_data: "{}"'.format(pprint.pformat(token_data)))

        if (openstack_utils.get_os_release() <
                openstack_utils.get_os_release('xenial_queens')):
            openrc = {
                'API_VERSION': 2,
                'OS_USERNAME': DEMO_USER,
                'OS_PASSWORD': DEMO_PASSWORD,
                'OS_TENANT_NAME': DEMO_TENANT,
            }
            for ip in self.keystone_ips:
                openrc.update(
                    {'OS_AUTH_URL': 'http://{}:5000/v2.0'.format(ip)})
                _validate_token_data(openrc)

        if (openstack_utils.get_os_release() >=
                openstack_utils.get_os_release('trusty_mitaka')):
            openrc = {
                'API_VERSION': 3,
                'OS_REGION_NAME': 'RegionOne',
                'OS_USER_DOMAIN_NAME': DEMO_DOMAIN,
                'OS_USERNAME': DEMO_USER,
                'OS_PASSWORD': DEMO_PASSWORD,
                'OS_PROJECT_DOMAIN_NAME': DEMO_DOMAIN,
                'OS_PROJECT_NAME': DEMO_PROJECT,
            }
            with self.v3_keystone_preferred():
                for ip in self.keystone_ips:
                    openrc.update(
                        {'OS_AUTH_URL': 'http://{}:5000/v3'.format(ip)})
                    _validate_token_data(openrc)
コード例 #5
0
    def test_blocked_when_non_pristine_disk_appears(self):
        """Test blocked state with non-pristine disk.

        Validate that charm goes into blocked state when it is presented with
        new block devices that have foreign data on them.
        Instances used in UOSCI has a flavour with ephemeral storage in
        addition to the bootable instance storage.  The ephemeral storage
        device is partitioned, formatted and mounted early in the boot process
        by cloud-init.
        As long as the device is mounted the charm will not attempt to use it.
        If we unmount it and trigger the config-changed hook the block device
        will appear as a new and previously untouched device for the charm.
        One of the first steps of device eligibility checks should be to make
        sure we are seeing a pristine and empty device before doing any
        further processing.
        As the ephemeral device will have data on it we can use it to validate
        that these checks work as intended.
        """
        current_release = zaza_openstack.get_os_release()
        focal_ussuri = zaza_openstack.get_os_release('focal_ussuri')
        if current_release >= focal_ussuri:
            # NOTE(ajkavanagh) - focal (on ServerStack) is broken for /dev/vdb
            # and so this test can't pass: LP#1842751 discusses the issue, but
            # basically the snapd daemon along with lxcfs results in /dev/vdb
            # being mounted in the lxcfs process namespace.  If the charm
            # 'tries' to umount it, it can (as root), but the mount is still
            # 'held' by lxcfs and thus nothing else can be done with it.  This
            # is only a problem in serverstack with images with a default
            # /dev/vdb ephemeral
            logging.warn("Skipping pristine disk test for focal and higher")
            return
        logging.info('Checking behaviour when non-pristine disks appear...')
        logging.info('Configuring ephemeral-unmount...')
        alternate_conf = {
            'ephemeral-unmount': '/mnt',
            'osd-devices': '/dev/vdb'
        }
        juju_service = 'ceph-osd'
        zaza_model.set_application_config(juju_service, alternate_conf)
        ceph_osd_states = {
            'ceph-osd': {
                'workload-status': 'blocked',
                'workload-status-message': 'Non-pristine'
            }
        }
        zaza_model.wait_for_application_states(states=ceph_osd_states)
        logging.info('Units now in blocked state, running zap-disk action...')
        unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2']
        for unit_name in unit_names:
            zap_disk_params = {
                'devices': '/dev/vdb',
                'i-really-mean-it': True,
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='zap-disk',
                action_params=zap_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Running add-disk action...')
        for unit_name in unit_names:
            add_disk_params = {
                'osd-devices': '/dev/vdb',
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='add-disk',
                action_params=add_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        logging.info('OK')

        set_default = {
            'ephemeral-unmount': '',
            'osd-devices': '/dev/vdb',
        }

        current_release = zaza_openstack.get_os_release()
        bionic_train = zaza_openstack.get_os_release('bionic_train')
        if current_release < bionic_train:
            set_default['osd-devices'] = '/dev/vdb /srv/ceph'

        logging.info('Restoring to default configuration...')
        zaza_model.set_application_config(juju_service, set_default)

        zaza_model.wait_for_application_states()
コード例 #6
0
def _login(dashboard_ip, domain, username, password):
    """Login to the website to get a session.

    :param dashboard_ip: The IP address of the dashboard to log in to.
    :type dashboard_ip: str
    :param domain: the domain to login into
    :type domain: str
    :param username: the username to login as
    :type username: str
    :param password: the password to use to login
    :type password: str
    :returns: tuple of (client, response) where response is the page after
              logging in.
    :rtype: (requests.sessions.Session, requests.models.Response)
    :raises: FailedAuth if the authorisation doesn't work
    """
    auth_url = 'http://{}/horizon/auth/login/'.format(dashboard_ip)

    # start session, get csrftoken
    client = requests.session()
    client.get(auth_url)

    if 'csrftoken' in client.cookies:
        csrftoken = client.cookies['csrftoken']
    else:
        raise Exception("Missing csrftoken")

    # build and send post request
    overcloud_auth = openstack_utils.get_overcloud_auth()

    if overcloud_auth['OS_AUTH_URL'].endswith("v2.0"):
        api_version = 2
    else:
        api_version = 3
    keystone_client = openstack_utils.get_keystone_client(overcloud_auth)
    catalog = keystone_client.service_catalog.get_endpoints()
    logging.info(catalog)
    if api_version == 2:
        region = catalog['identity'][0]['publicURL']
    else:
        region = [
            i['url'] for i in catalog['identity'] if i['interface'] == 'public'
        ][0]

    auth = {
        'domain': domain,
        'username': username,
        'password': password,
        'csrfmiddlewaretoken': csrftoken,
        'next': '/horizon/',
        'region': region,
    }

    # In the minimal test deployment /horizon/project/ is unauthorized,
    # this does not occur in a full deployment and is probably due to
    # services/information missing that horizon wants to display data
    # for.
    # Redirect to /horizon/identity/ instead.
    if (openstack_utils.get_os_release() >=
            openstack_utils.get_os_release('xenial_queens')):
        auth['next'] = '/horizon/identity/'

    if (openstack_utils.get_os_release() >=
            openstack_utils.get_os_release('bionic_stein')):
        auth['region'] = 'default'

    if api_version == 2:
        del auth['domain']

    logging.info('POST data: "{}"'.format(auth))
    response = client.post(auth_url, data=auth, headers={'Referer': auth_url})

    # NOTE(ajkavanagh) there used to be a trusty/icehouse test in the
    # amulet test, but as the zaza tests only test from trusty/mitaka
    # onwards, the test has been dropped
    if (openstack_utils.get_os_release() >=
            openstack_utils.get_os_release('bionic_stein')):
        expect = "Sign Out"
        # update the in dashboard seems to require region to be default in
        # this test configuration
        region = 'default'
    else:
        expect = 'Projects - OpenStack Dashboard'

    if expect not in response.text:
        msg = 'FAILURE code={} text="{}"'.format(response, response.text)
        logging.info("Yeah, wen't wrong: {}".format(msg))
        raise FailedAuth(msg)
    logging.info("Logged into okay")
    return client, response
コード例 #7
0
ファイル: tests.py プロジェクト: wolsen/zaza-openstack-tests
    def test_400_gnocchi_metrics(self):
        """Verify that ceilometer-agent publishes metrics to gnocchi."""
        current_os_release = openstack_utils.get_os_release()
        openstack_pike_or_older = (
            current_os_release <=
            openstack_utils.get_os_release('xenial_pike'))
        if openstack_pike_or_older:
            # Both the charm and Ceilometer itself had different behaviors in
            # terms of which metrics were published and how fast, which would
            # lead to a combinatorial explosion if we had to maintain test
            # expectations for these old releases.
            logging.info('OpenStack Pike or older, skipping')
            return

        # ceilometer-agent-compute reports metrics for each existing VM, so at
        # least one VM is needed:
        self.RESOURCE_PREFIX = 'zaza-ceilometer-agent'
        self.launch_guest('ubuntu', instance_key=glance_setup.LTS_IMAGE_NAME)

        logging.info('Instantiating gnocchi client...')
        overcloud_auth = openstack_utils.get_overcloud_auth()
        keystone = openstack_utils.get_keystone_client(overcloud_auth)
        gnocchi_ep = keystone.service_catalog.url_for(service_type='metric',
                                                      interface='publicURL')
        gnocchi = gnocchi_client.Client(
            session=openstack_utils.get_overcloud_keystone_session(),
            adapter_options={
                'endpoint_override': gnocchi_ep,
            })

        expected_metric_names = self.__get_expected_metric_names(
            current_os_release)

        min_timeout_seconds = 500
        polling_interval_seconds = (
            openstack_utils.get_application_config_option(
                self.application_name, 'polling-interval'))
        timeout_seconds = max(10 * polling_interval_seconds,
                              min_timeout_seconds)
        logging.info('Giving ceilometer-agent {}s to publish all metrics to '
                     'gnocchi...'.format(timeout_seconds))

        max_time = time.time() + timeout_seconds
        while time.time() < max_time:
            found_metric_names = {
                metric['name']
                for metric in gnocchi.metric.list()
            }
            missing_metric_names = expected_metric_names - found_metric_names
            if len(missing_metric_names) == 0:
                logging.info('All expected metrics found.')
                break
            time.sleep(polling_interval_seconds)

        unexpected_found_metric_names = (found_metric_names -
                                         expected_metric_names)
        if len(unexpected_found_metric_names) > 0:
            self.fail('Unexpected metrics '
                      'published: ' + ', '.join(unexpected_found_metric_names))

        if len(missing_metric_names) > 0:
            self.fail('These metrics should have been published but '
                      "weren't: " + ', '.join(missing_metric_names))
コード例 #8
0
 def setUpClass(cls):
     """Run class setup for running Ceilometer tests."""
     super(CeilometerTest, cls).setUpClass()
     cls.current_release = openstack_utils.get_os_release()
コード例 #9
0
    def test_end_user_domain_admin_access(self):
        """Verify that end-user domain admin does not have elevated privileges.

        In additon to validating that the `policy.json` is written and the
        service is restarted on config-changed, the test validates that our
        `policy.json` is correct.

        Catch regressions like LP: #1651989
        """
        if (openstack_utils.get_os_release() <
                openstack_utils.get_os_release('xenial_ocata')):
            logging.info('skipping test < xenial_ocata')
            return
        with self.config_change(
            {'preferred-api-version': self.default_api_version},
            {'preferred-api-version': '3'},
                application_name="keystone"):
            for ip in self.keystone_ips:
                openrc = {
                    'API_VERSION': 3,
                    'OS_USERNAME': DEMO_ADMIN_USER,
                    'OS_PASSWORD': DEMO_ADMIN_USER_PASSWORD,
                    'OS_AUTH_URL': 'http://{}:5000/v3'.format(ip),
                    'OS_USER_DOMAIN_NAME': DEMO_DOMAIN,
                    'OS_DOMAIN_NAME': DEMO_DOMAIN,
                }
                if self.tls_rid:
                    openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
                    openrc['OS_AUTH_URL'] = (openrc['OS_AUTH_URL'].replace(
                        'http', 'https'))
                logging.info('keystone IP {}'.format(ip))
                keystone_session = openstack_utils.get_keystone_session(
                    openrc, scope='DOMAIN')
                keystone_client = openstack_utils.get_keystone_session_client(
                    keystone_session)
                try:
                    # expect failure
                    keystone_client.domains.list()
                except keystoneauth1.exceptions.http.Unauthorized as e:
                    # This is to handle LP bug 1834287. We handle this error
                    # separately because it's a case of the client not being
                    # authenticated whereas the test is about checking if the
                    # client is authenticated but not authorized. We catch it
                    # so that we can log it properly and then re-raise it to
                    # indicate an underlying error that this test is unable
                    # to handle.
                    #
                    # Note that without catching this, the test will fail
                    # anyway but why it failed is not immediately obvious.
                    # This puts the reason front and center for the sake of
                    # efficiency
                    logging.error('Client is not authenticated. Test cannot '
                                  'continue...ERROR ({})'.format(e))
                    raise e
                except keystoneauth1.exceptions.http.Forbidden as e:
                    logging.debug('Retrieve domain list as end-user domain '
                                  'admin NOT allowed...OK ({})'.format(e))
                    pass
                else:
                    raise zaza_exceptions.KeystoneAuthorizationPermissive(
                        'Retrieve domain list as end-user domain admin '
                        'allowed when it should not be.')
        logging.info('OK')
コード例 #10
0
    def test_003_test_override_is_observed(self):
        """Test that the override is observed by the underlying service."""
        if (openstack_utils.get_os_release() <
                openstack_utils.get_os_release('groovy_victoria')):
            raise unittest.SkipTest(
                "Test skipped until Bug #1880959 is fix released")
        if self._test_name is None:
            logging.info("Doing policyd override for {}".format(
                self._service_name))
        else:
            logging.info(self._test_name)
        # note policyd override only works with Xenial-queens and so keystone
        # is already v3

        # Allow the overriden class to setup the environment before the policyd
        # test is performed.
        self.setup_for_attempt_operation(self.keystone_ips[0])

        # verify that the operation works before performing the policyd
        # override.
        zaza_model.block_until_wl_status_info_starts_with(
            self.application_name, "PO:", negate_match=True)
        zaza_model.block_until_all_units_idle()
        logging.info("First verify that operation works prior to override")
        try:
            self.get_client_and_attempt_operation(self.keystone_ips[0])
        except Exception as e:
            self.cleanup_for_attempt_operation(self.keystone_ips[0])
            raise zaza_exceptions.PolicydError(
                'Service action failed and should have passed. "{}"'.format(
                    str(e)))

        # now do the policyd override.
        logging.info("Doing policyd override with: {}".format(self._rule))
        self._set_policy_with(self._rule)
        zaza_model.block_until_wl_status_info_starts_with(
            self.application_name, "PO:")
        zaza_model.block_until_all_units_idle()

        # now make sure the operation fails
        logging.info("Now verify that operation doesn't work with override")
        try:
            self.get_client_and_attempt_operation(self.keystone_ips[0])
            raise zaza_exceptions.PolicydError(
                "Service action passed and should have failed.")
        except PolicydOperationFailedException:
            pass
        except zaza_exceptions.PolicydError as e:
            logging.info("{}".format(str(e)))
            raise
        except Exception as e:
            logging.info("exception was: {}".format(e.__class__.__name__))
            import traceback
            logging.info(traceback.format_exc())
            self.cleanup_for_attempt_operation(self.keystone_ips[0])
            raise zaza_exceptions.PolicydError(
                'Service action failed in an unexpected way: {}'.format(
                    str(e)))

        # clean out the policy and wait
        self._set_config(False)
        # check that the status no longer has "PO:" on it.
        # we have to do it twice due to async races and that some info lines
        # erase the PO: bit prior to actuall getting back to idle.  The double
        # check verifies that the charms have started, the idle waits until it
        # is finished, and then the final check really makes sure they got
        # switched off.
        zaza_model.block_until_wl_status_info_starts_with(
            self.application_name, "PO:", negate_match=True)
        zaza_model.block_until_all_units_idle()
        zaza_model.block_until_wl_status_info_starts_with(
            self.application_name, "PO:", negate_match=True)

        # Finally make sure it works again!
        logging.info("Finally verify that operation works after removing the "
                     "override.")
        try:
            self.get_client_and_attempt_operation(self.keystone_ips[0])
        except Exception as e:
            raise zaza_exceptions.PolicydError(
                'Service action failed and should have passed after removing '
                'policy override: "{}"'.format(str(e)))
        finally:
            self.cleanup_for_attempt_operation(self.keystone_ips[0])
コード例 #11
0
def add_demo_user():
    """Add a demo user to the current deployment."""
    def _v2():
        keystone_session = openstack_utils.get_overcloud_keystone_session()
        keystone_client = openstack_utils.get_keystone_session_client(
            keystone_session, client_api_version=2)
        tenant = keystone_client.tenants.create(tenant_name=DEMO_TENANT,
                                                description='Demo Tenant',
                                                enabled=True)
        keystone_client.users.create(name=DEMO_USER,
                                     password=DEMO_PASSWORD,
                                     tenant_id=tenant.id)

    def _v3():
        keystone_session = openstack_utils.get_overcloud_keystone_session()
        keystone_client = openstack_utils.get_keystone_session_client(
            keystone_session)
        domain = keystone_client.domains.create(DEMO_DOMAIN,
                                                description='Demo Domain',
                                                enabled=True)
        project = keystone_client.projects.create(DEMO_PROJECT,
                                                  domain,
                                                  description='Demo Project',
                                                  enabled=True)
        demo_user = keystone_client.users.create(DEMO_USER,
                                                 domain=domain,
                                                 project=project,
                                                 password=DEMO_PASSWORD,
                                                 email='*****@*****.**',
                                                 description='Demo User',
                                                 enabled=True)
        member_role = keystone_client.roles.find(name='Member')
        keystone_client.roles.grant(member_role,
                                    user=demo_user,
                                    project_domain=domain,
                                    project=project)
        demo_admin_user = keystone_client.users.create(
            DEMO_ADMIN_USER,
            domain=domain,
            project=project,
            password=DEMO_ADMIN_USER_PASSWORD,
            email='*****@*****.**',
            description='Demo Admin User',
            enabled=True)
        admin_role = keystone_client.roles.find(name='Admin')
        keystone_client.roles.grant(admin_role,
                                    user=demo_admin_user,
                                    domain=domain)
        keystone_client.roles.grant(member_role,
                                    user=demo_admin_user,
                                    project_domain=domain,
                                    project=project)
        keystone_client.roles.grant(admin_role,
                                    user=demo_admin_user,
                                    project_domain=domain,
                                    project=project)

    if (openstack_utils.get_os_release() <
            openstack_utils.get_os_release('trusty_mitaka')):
        # create only V2 user
        _v2()
        return

    if (openstack_utils.get_os_release() >=
            openstack_utils.get_os_release('trusty_mitaka')
            and openstack_utils.get_os_release() <
            openstack_utils.get_os_release('xenial_queens')):
        # create V2 and V3 user
        _v2()

        _singleton = BaseKeystoneTest()
        _singleton.setUpClass()
        # Explicitly set application name in case setup is called by a charm
        # under test other than keystone.
        with _singleton.config_change(
            {'preferred-api-version': _singleton.default_api_version},
            {'preferred-api-version': 3},
                application_name="keystone"):
            _v3()
    else:
        # create only V3 user
        _v3()
コード例 #12
0
    def test_401_authenticate(self):
        """Validate that authentication succeeds for client log in.

        Ported from amulet tests.
        """
        logging.info('Checking authentication through dashboard...')

        unit_name = zaza_model.get_lead_unit_name('openstack-dashboard')
        keystone_unit = zaza_model.get_lead_unit_name('keystone')
        dashboard_relation = openstack_juju.get_relation_from_unit(
            keystone_unit, unit_name, 'identity-service')
        dashboard_ip = dashboard_relation['private-address']
        logging.debug("... dashboard_ip is:{}".format(dashboard_ip))

        url = 'http://{}/horizon/auth/login/'.format(dashboard_ip)

        overcloud_auth = openstack_utils.get_overcloud_auth()
        if overcloud_auth['OS_AUTH_URL'].endswith("v2.0"):
            api_version = 2
        else:
            api_version = 3
        keystone_client = openstack_utils.get_keystone_client(overcloud_auth)
        catalog = keystone_client.service_catalog.get_endpoints()
        logging.info(catalog)
        if api_version == 2:
            region = catalog['identity'][0]['publicURL']
        else:
            region = [
                i['url'] for i in catalog['identity']
                if i['interface'] == 'public'
            ][0]

        # NOTE(ajkavanagh) there used to be a trusty/icehouse test in the
        # amulet test, but as the zaza tests only test from trusty/mitaka
        # onwards, the test has been dropped
        if (openstack_utils.get_os_release() >=
                openstack_utils.get_os_release('bionic_stein')):
            expect = "Sign Out"
            # update the in dashboard seems to require region to be default in
            # this test configuration
            region = 'default'
        else:
            expect = 'Projects - OpenStack Dashboard'

        # NOTE(thedac) Similar to the connection test above we get occasional
        # intermittent authentication fails. Wrap in a retry loop.
        @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1,
                                                       min=5,
                                                       max=10),
                        retry=tenacity.retry_unless_exception_type(
                            self.AuthExceptions),
                        reraise=True)
        def _do_auth_check(expect):
            # start session, get csrftoken
            client = requests.session()
            client.get(url)

            if 'csrftoken' in client.cookies:
                csrftoken = client.cookies['csrftoken']
            else:
                raise Exception("Missing csrftoken")

            # build and send post request
            auth = {
                'domain': 'admin_domain',
                'username': '******',
                'password': overcloud_auth['OS_PASSWORD'],
                'csrfmiddlewaretoken': csrftoken,
                'next': '/horizon/',
                'region': region,
            }

            # In the minimal test deployment /horizon/project/ is unauthorized,
            # this does not occur in a full deployment and is probably due to
            # services/information missing that horizon wants to display data
            # for.
            # Redirect to /horizon/identity/ instead.
            if (openstack_utils.get_os_release() >=
                    openstack_utils.get_os_release('xenial_queens')):
                auth['next'] = '/horizon/identity/'

            if (openstack_utils.get_os_release() >=
                    openstack_utils.get_os_release('bionic_stein')):
                auth['region'] = 'default'

            if api_version == 2:
                del auth['domain']

            logging.info('POST data: "{}"'.format(auth))
            response = client.post(url, data=auth, headers={'Referer': url})

            if expect not in response.text:
                msg = 'FAILURE code={} text="{}"'.format(
                    response, response.text)
                # NOTE(thedac) amulet.raise_status exits on exception.
                # Raise a custom exception.
                logging.info("Yeah, wen't wrong: {}".format(msg))
                raise self.FailedAuth(msg)
            raise self.PassedAuth()

        try:
            _do_auth_check(expect)
        except self.FailedAuth as e:
            assert False, str(e)
        except self.PassedAuth:
            pass
コード例 #13
0
ファイル: tests.py プロジェクト: sahid/zaza-openstack-tests
    def test_410_cinder_vol_create_backup_delete_restore_pool_inspect(self):
        """Create, backup, delete, restore a ceph-backed cinder volume.

        Create, backup, delete, restore a ceph-backed cinder volume, and
        inspect ceph cinder pool object count as the volume is created
        and deleted.
        """
        unit_name = zaza.model.get_lead_unit_name('ceph-mon')
        obj_count_samples = []
        pool_size_samples = []
        pools = ceph_utils.get_ceph_pools(unit_name)
        expected_pool = 'cinder-ceph'
        cinder_ceph_pool = pools[expected_pool]

        # Check ceph cinder pool object count, disk space usage and pool name
        logging.info('Checking ceph cinder pool original samples...')
        pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
            unit_name, cinder_ceph_pool)

        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        self.assertEqual(pool_name, expected_pool)

        # Create ceph-backed cinder volume
        cinder_vol = self.cinder_client.volumes.create(
            name='{}-410-vol'.format(self.RESOURCE_PREFIX), size=1)
        openstack_utils.resource_reaches_status(self.cinder_client.volumes,
                                                cinder_vol.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=15,
                                                expected_status='available',
                                                msg='Volume status wait')

        # Backup the volume
        vol_backup = self.cinder_client.backups.create(
            cinder_vol.id,
            name='{}-410-backup-vol'.format(self.RESOURCE_PREFIX))
        openstack_utils.resource_reaches_status(self.cinder_client.backups,
                                                vol_backup.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=15,
                                                expected_status='available',
                                                msg='Volume status wait')
        # Delete the volume
        openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
        # Restore the volume
        self.cinder_client.restores.restore(vol_backup.id)
        openstack_utils.resource_reaches_status(self.cinder_client.backups,
                                                vol_backup.id,
                                                wait_iteration_max_time=180,
                                                stop_after_attempt=15,
                                                expected_status='available',
                                                msg='Backup status wait')
        # Delete the backup
        openstack_utils.delete_volume_backup(self.cinder_client, vol_backup.id)
        openstack_utils.resource_removed(self.cinder_client.backups,
                                         vol_backup.id,
                                         wait_iteration_max_time=180,
                                         stop_after_attempt=15,
                                         msg="Backup volume")

        # Re-check ceph cinder pool object count and disk usage
        logging.info('Checking ceph cinder pool samples '
                     'after volume create...')
        pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
            unit_name, cinder_ceph_pool, self.model_name)

        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        name = '{}-410-vol'.format(self.RESOURCE_PREFIX)
        vols = self.cinder_client.volumes.list()
        try:
            cinder_vols = [v for v in vols if v.name == name]
        except AttributeError:
            cinder_vols = [v for v in vols if v.display_name == name]
        if not cinder_vols:
            # NOTE(hopem): it appears that at some point cinder-backup stopped
            # restoring volume metadata properly so revert to default name if
            # original is not found
            name = "restore_backup_{}".format(vol_backup.id)
            try:
                cinder_vols = [v for v in vols if v.name == name]
            except AttributeError:
                cinder_vols = [v for v in vols if v.display_name == name]

        self.assertTrue(cinder_vols)

        cinder_vol = cinder_vols[0]

        # Delete restored cinder volume
        openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
        openstack_utils.resource_removed(self.cinder_client.volumes,
                                         cinder_vol.id,
                                         wait_iteration_max_time=180,
                                         stop_after_attempt=15,
                                         msg="Volume")

        # Final check, ceph cinder pool object count and disk usage
        logging.info('Checking ceph cinder pool after volume delete...')
        pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
            unit_name, cinder_ceph_pool, self.model_name)

        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        # Validate ceph cinder pool object count samples over time
        original, created, deleted = range(3)
        self.assertFalse(
            obj_count_samples[created] <= obj_count_samples[original])
        self.assertFalse(
            obj_count_samples[deleted] >= obj_count_samples[created])

        # Luminous (pike) ceph seems more efficient at disk usage so we cannot
        # grantee the ordering of kb_used
        if (openstack_utils.get_os_release() <
                openstack_utils.get_os_release('xenial_mitaka')):
            self.assertFalse(
                pool_size_samples[created] <= pool_size_samples[original])
            self.assertFalse(
                pool_size_samples[deleted] >= pool_size_samples[created])
コード例 #14
0
 def setUpClass(cls):
     """Run class setup for running nova-cloud-controller tests."""
     super(NovaCloudController, cls).setUpClass()
     cls.current_release = openstack_utils.get_os_release()
コード例 #15
0
ファイル: tests.py プロジェクト: wolsen/zaza-openstack-tests
    def __get_expected_metric_names(self, current_os_release):
        expected_metric_names = {
            'compute.instance.booting.time',
            'disk.ephemeral.size',
            'disk.root.size',
            'image.download',
            'image.serve',
            'image.size',
            'memory',
            'vcpus',
        }

        all_polsters_are_enabled = (
            openstack_utils.get_application_config_option(
                self.application_name, 'enable-all-pollsters'))

        if all_polsters_are_enabled:
            expected_metric_names |= {
                'disk.device.allocation',
                'disk.device.capacity',
                'disk.device.read.latency',
                'disk.device.usage',
                'disk.device.write.latency',
                'memory.resident',
                'memory.swap.in',
                'memory.swap.out',
                'network.incoming.packets.drop',
                'network.incoming.packets.error',
                'network.outgoing.packets.drop',
                'network.outgoing.packets.error',
            }

        openstack_queens_or_older = (
            current_os_release <=
            openstack_utils.get_os_release('bionic_queens'))
        openstack_rocky_or_older = (
            current_os_release <=
            openstack_utils.get_os_release('bionic_rocky'))
        openstack_victoria_or_older = (
            current_os_release <=
            openstack_utils.get_os_release('groovy_victoria'))

        if openstack_victoria_or_older:
            expected_metric_names |= {
                'cpu',
                'disk.device.read.bytes',
                'disk.device.read.requests',
                'disk.device.write.bytes',
                'disk.device.write.requests',
                'memory.usage',
                'network.incoming.bytes',
                'network.incoming.packets',
                'network.outgoing.bytes',
                'network.outgoing.packets',
            }

        if openstack_rocky_or_older:
            expected_metric_names |= {
                'cpu.delta',
                'cpu_util',
                'disk.device.read.bytes.rate',
                'disk.device.read.requests.rate',
                'disk.device.write.bytes.rate',
                'disk.device.write.requests.rate',
                'network.incoming.bytes.rate',
                'network.incoming.packets.rate',
                'network.outgoing.bytes.rate',
                'network.outgoing.packets.rate',
            }
            if all_polsters_are_enabled:
                expected_metric_names |= {
                    'disk.allocation',
                    'disk.capacity',
                    'disk.read.bytes',
                    'disk.read.bytes.rate',
                    'disk.read.requests',
                    'disk.read.requests.rate',
                    'disk.usage',
                    'disk.write.bytes',
                    'disk.write.bytes.rate',
                    'disk.write.requests',
                    'disk.write.requests.rate',
                }

        if openstack_queens_or_older:
            expected_metric_names |= {
                'cpu_l3_cache',
                'disk.allocation',
                'disk.capacity',
                'disk.device.allocation',
                'disk.device.capacity',
                'disk.device.iops',
                'disk.device.latency',
                'disk.device.read.latency',
                'disk.device.usage',
                'disk.device.write.latency',
                'disk.iops',
                'disk.latency',
                'disk.read.bytes',
                'disk.read.bytes.rate',
                'disk.read.requests',
                'disk.read.requests.rate',
                'disk.usage',
                'disk.write.bytes',
                'disk.write.bytes.rate',
                'disk.write.requests',
                'disk.write.requests.rate',
                'memory.bandwidth.local',
                'memory.bandwidth.total',
                'memory.resident',
                'memory.swap.in',
                'memory.swap.out',
                'network.incoming.packets.drop',
                'network.incoming.packets.error',
                'network.outgoing.packets.drop',
                'network.outgoing.packets.error',
                'perf.cache.misses',
                'perf.cache.references',
                'perf.cpu.cycles',
                'perf.instructions',
            }

        return expected_metric_names
コード例 #16
0
class CeilometerTest(test_utils.OpenStackBaseTest):
    """Encapsulate Ceilometer tests."""

    CONF_FILE = '/etc/ceilometer/ceilometer.conf'

    XENIAL_PIKE = openstack_utils.get_os_release('xenial_pike')
    XENIAL_OCATA = openstack_utils.get_os_release('xenial_ocata')
    XENIAL_NEWTON = openstack_utils.get_os_release('xenial_newton')
    XENIAL_MITAKA = openstack_utils.get_os_release('xenial_mitaka')
    TRUSTY_MITAKA = openstack_utils.get_os_release('trusty_mitaka')
    TRUSTY_LIBERTY = openstack_utils.get_os_release('trusty_liberty')

    @classmethod
    def setUpClass(cls):
        """Run class setup for running Ceilometer tests."""
        super(CeilometerTest, cls).setUpClass()
        cls.current_release = openstack_utils.get_os_release()

    @property
    def services(self):
        """Return a list of services for Openstack Release."""
        services = []

        if self.application_name == 'ceilometer-agent':
            if self.current_release <= CeilometerTest.XENIAL_MITAKA:
                services.append('ceilometer-polling')
            else:
                services.append('ceilometer-polling: AgentManager worker(0)')
            return services

        # Note: disabling ceilometer-polling and ceilometer-agent-central due
        # to bug 1846390: https://bugs.launchpad.net/bugs/1846390
        if self.current_release >= CeilometerTest.XENIAL_PIKE:
            # services.append('ceilometer-polling: AgentManager worker(0)')
            services.append('ceilometer-agent-notification: '
                            'NotificationService worker(0)')
        elif self.current_release >= CeilometerTest.XENIAL_OCATA:
            services.append('ceilometer-collector: CollectorService worker(0)')
            # services.append('ceilometer-polling: AgentManager worker(0)')
            services.append('ceilometer-agent-notification: '
                            'NotificationService worker(0)')
            services.append('apache2')
        elif self.current_release >= CeilometerTest.XENIAL_NEWTON:
            services.append('ceilometer-collector - CollectorService(0)')
            # services.append('ceilometer-polling - AgentManager(0)')
            services.append('ceilometer-agent-notification - '
                            'NotificationService(0)')
            services.append('ceilometer-api')
        else:
            services.append('ceilometer-collector')
            services.append('ceilometer-api')
            services.append('ceilometer-agent-notification')

            if self.current_release < CeilometerTest.TRUSTY_MITAKA:
                services.append('ceilometer-alarm-notifier')
                services.append('ceilometer-alarm-evaluator')

            # if self.current_release >= CeilometerTest.TRUSTY_LIBERTY:
            # Liberty and later
            # services.append('ceilometer-polling')
            # else:
            # Juno and earlier
            # services.append('ceilometer-agent-central')

        return services

    @property
    def restartable_services(self):
        """Return a list of services that are known to be restartable.

        For Openstack Release these services are known to be able to be stopped
        and started with no issues.
        """
        # Due to Bug #1861321 ceilometer-collector does not reliably
        # restart.
        _services = copy.deepcopy(self.services)
        if self.current_release <= CeilometerTest.TRUSTY_MITAKA:
            try:
                _services.remove('ceilometer-collector')
            except ValueError:
                pass
        return _services

    def test_400_api_connection(self):
        """Simple api calls to check service is up and responding."""
        if self.current_release >= CeilometerTest.XENIAL_PIKE:
            logging.info('Skipping API checks as ceilometer api has been '
                         'removed')
            return

        logging.info('Instantiating ceilometer client...')
        ceil = ceilo_client.Client(
            session=openstack_utils.get_overcloud_keystone_session())

        logging.info('Checking api functionality...')
        assert (ceil.samples.list() == [])
        assert (ceil.meters.list() == [])

    def test_900_restart_on_config_change(self):
        """Checking restart happens on config change."""
        config_name = 'debug'

        if self.application_name == 'ceilometer-agent':
            config_name = 'use-internal-endpoints'

        # Expected default and alternate values
        current_value = openstack_utils.get_application_config_option(
            self.application_name, config_name)
        assert type(current_value) == bool
        new_value = not current_value

        # Convert bool to str
        current_value = str(current_value)
        new_value = str(new_value)

        set_default = {config_name: current_value}
        set_alternate = {config_name: new_value}

        default_entry = {'DEFAULT': {'debug': [current_value]}}
        alternate_entry = {'DEFAULT': {'debug': [new_value]}}

        if self.application_name == 'ceilometer-agent':
            default_entry = None
            alternate_entry = {
                'service_credentials': {
                    'interface': ['internalURL']
                }
            }

        logging.info('changing config: {}'.format(set_alternate))
        self.restart_on_changed(CeilometerTest.CONF_FILE, set_default,
                                set_alternate, default_entry, alternate_entry,
                                self.restartable_services)

    def test_901_pause_resume(self):
        """Run pause and resume tests.

        Pause service and check services are stopped then resume and check
        they are started.
        """
        with self.pause_resume(self.restartable_services):
            logging.info("Testing pause and resume")
コード例 #17
0
 def setUpClass(cls):
     """Run class setup for OVN migration tests."""
     super(OVSOVNMigrationTest, cls).setUpClass()
     cls.current_release = openstack_utils.get_os_release(
         openstack_utils.get_current_os_release_pair())
コード例 #18
0
    def test_ceph_encryption(self):
        """Test Ceph encryption.

        Verify that the new disk is added with encryption by checking for
        Ceph's encryption keys directory.
        """
        current_release = zaza_openstack.get_os_release()
        trusty_mitaka = zaza_openstack.get_os_release('trusty_mitaka')
        if current_release >= trusty_mitaka:
            logging.warn("Skipping encryption test for Mitaka and higher")
            return
        unit_name = 'ceph-osd/0'
        set_default = {
            'osd-encrypt': 'False',
            'osd-devices': '/dev/vdb /srv/ceph',
        }
        set_alternate = {
            'osd-encrypt': 'True',
            'osd-devices': '/dev/vdb /srv/ceph /srv/ceph_encrypted',
        }
        juju_service = 'ceph-osd'
        logging.info('Making config change on {}...'.format(juju_service))
        mtime = zaza_model.get_unit_time(unit_name)

        file_mtime = None

        folder_name = '/etc/ceph/dmcrypt-keys/'
        with self.config_change(set_default, set_alternate,
                                application_name=juju_service):
            with tempfile.TemporaryDirectory() as tempdir:
                # Creating a temp dir to copy keys
                temp_folder = '/tmp/dmcrypt-keys'
                cmd = 'mkdir {}'.format(temp_folder)
                ret = zaza_model.run_on_unit(unit_name, cmd)
                logging.debug('Ret for cmd {} is {}'.format(cmd, ret))
                # Copy keys from /etc to /tmp
                cmd = 'sudo cp {}* {}'.format(folder_name, temp_folder)
                ret = zaza_model.run_on_unit(unit_name, cmd)
                logging.debug('Ret for cmd {} is {}'.format(cmd, ret))
                # Changing permissions to be able to SCP the files
                cmd = 'sudo chown -R ubuntu:ubuntu {}'.format(temp_folder)
                ret = zaza_model.run_on_unit(unit_name, cmd)
                logging.debug('Ret for cmd {} is {}'.format(cmd, ret))
                # SCP to retrieve all files in folder
                # -p: preserve timestamps
                source = '/tmp/dmcrypt-keys/*'
                zaza_model.scp_from_unit(unit_name=unit_name,
                                         source=source,
                                         destination=tempdir,
                                         scp_opts='-p')
                for elt in listdir(tempdir):
                    file_path = '/'.join([tempdir, elt])
                    if path.isfile(file_path):
                        file_mtime = path.getmtime(file_path)
                        if file_mtime:
                            break

        if not file_mtime:
            logging.warn('Could not determine mtime, assuming '
                         'folder does not exist')
            raise FileNotFoundError('folder does not exist')

        if file_mtime >= mtime:
            logging.info('Folder mtime is newer than provided mtime '
                         '(%s >= %s) on %s (OK)' % (file_mtime,
                                                    mtime, unit_name))
        else:
            logging.warn('Folder mtime is older than provided mtime'
                         '(%s < on %s) on %s' % (file_mtime,
                                                 mtime, unit_name))
            raise Exception('Folder mtime is older than provided mtime')
コード例 #19
0
    def test_410_heat_stack_create_delete(self):
        """Create stack, confirm nova compute resource, delete stack."""
        # Verify new image name
        images_list = list(self.glance_client.images.list())
        self.assertEqual(images_list[0].name, IMAGE_NAME,
                         "glance image create failed or unexpected")

        # Create a heat stack from a heat template, verify its status
        logging.info('Creating heat stack...')
        t_name = 'hot_hello_world.yaml'
        if (openstack_utils.get_os_release() <
                openstack_utils.get_os_release('xenial_queens')):
            os_release = 'icehouse'
        else:
            os_release = 'queens'

        # Get location of template files in charm-heat
        bundle_path = charm_lifecycle_utils.BUNDLE_DIR
        if bundle_path[-1:] == "/":
            bundle_path = bundle_path[0:-1]

        file_rel_path = os.path.join(os.path.dirname(bundle_path),
                                     TEMPLATES_PATH, os_release, t_name)
        file_abs_path = os.path.abspath(file_rel_path)
        t_url = urlparse.urlparse(file_abs_path, scheme='file').geturl()
        logging.info('template url: {}'.format(t_url))

        r_req = self.heat_client.http_client
        t_files, template = template_utils.get_template_contents(t_url, r_req)
        env_files, env = template_utils.process_environment_and_files(
            env_path=None)

        fields = {
            'stack_name': STACK_NAME,
            'timeout_mins': '15',
            'disable_rollback': False,
            'parameters': {
                'admin_pass': '******',
                'key_name': nova_utils.KEYPAIR_NAME,
                'image': IMAGE_NAME
            },
            'template': template,
            'files': dict(list(t_files.items()) + list(env_files.items())),
            'environment': env
        }

        # Create the stack
        try:
            stack = self.heat_client.stacks.create(**fields)
            logging.info('Stack data: {}'.format(stack))
            stack_id = stack['stack']['id']
            logging.info('Creating new stack, ID: {}'.format(stack_id))
        except Exception as e:
            # Generally, an api or cloud config error if this is hit.
            msg = 'Failed to create heat stack: {}'.format(e)
            self.fail(msg)

        # Confirm stack reaches COMPLETE status.
        # /!\ Heat stacks reach a COMPLETE status even when nova cannot
        # find resources (a valid hypervisor) to fit the instance, in
        # which case the heat stack self-deletes!  Confirm anyway...
        openstack_utils.resource_reaches_status(self.heat_client.stacks,
                                                stack_id,
                                                expected_status="COMPLETE",
                                                msg="Stack status wait")
        # List stack
        stacks = list(self.heat_client.stacks.list())
        logging.info('All stacks: {}'.format(stacks))

        # Get stack information
        try:
            stack = self.heat_client.stacks.get(STACK_NAME)
        except Exception as e:
            # Generally, a resource availability issue if this is hit.
            msg = 'Failed to get heat stack: {}'.format(e)
            self.fail(msg)

        # Confirm stack name.
        logging.info('Expected, actual stack name: {}, '
                     '{}'.format(STACK_NAME, stack.stack_name))
        self.assertEqual(
            stack.stack_name, STACK_NAME, 'Stack name mismatch, '
            '{} != {}'.format(STACK_NAME, stack.stack_name))

        # Confirm existence of a heat-generated nova compute resource
        logging.info('Confirming heat stack resource status...')
        resource = self.heat_client.resources.get(STACK_NAME, RESOURCE_TYPE)
        server_id = resource.physical_resource_id
        self.assertTrue(server_id, "Stack failed to spawn a compute resource.")

        # Confirm nova instance reaches ACTIVE status
        openstack_utils.resource_reaches_status(self.nova_client.servers,
                                                server_id,
                                                expected_status="ACTIVE",
                                                msg="nova instance")
        logging.info('Nova instance reached ACTIVE status')

        # Delete stack
        logging.info('Deleting heat stack...')
        openstack_utils.delete_resource(self.heat_client.stacks,
                                        STACK_NAME,
                                        msg="heat stack")
コード例 #20
0
class CeilometerTest(test_utils.OpenStackBaseTest):
    """Encapsulate Ceilometer tests."""

    CONF_FILE = '/etc/ceilometer/ceilometer.conf'

    XENIAL_PIKE = openstack_utils.get_os_release('xenial_pike')
    XENIAL_OCATA = openstack_utils.get_os_release('xenial_ocata')
    XENIAL_NEWTON = openstack_utils.get_os_release('xenial_newton')
    TRUSTY_MITAKA = openstack_utils.get_os_release('trusty_mitaka')
    TRUSTY_LIBERTY = openstack_utils.get_os_release('trusty_liberty')

    @classmethod
    def setUpClass(cls):
        """Run class setup for running Ceilometer tests."""
        super(CeilometerTest, cls).setUpClass()

    @property
    def services(self):
        """Return a list services for Openstack Release."""
        current_release = openstack_utils.get_os_release()
        services = []

        if current_release >= CeilometerTest.XENIAL_PIKE:
            services.append('ceilometer-polling: AgentManager worker(0)')
            services.append('ceilometer-agent-notification: '
                            'NotificationService worker(0)')
        elif current_release >= CeilometerTest.XENIAL_OCATA:
            services.append('ceilometer-collector: CollectorService worker(0)')
            services.append('ceilometer-polling: AgentManager worker(0)')
            services.append('ceilometer-agent-notification: '
                            'NotificationService worker(0)')
            services.append('apache2')
        elif current_release >= CeilometerTest.XENIAL_NEWTON:
            services.append('ceilometer-collector - CollectorService(0)')
            services.append('ceilometer-polling - AgentManager(0)')
            services.append('ceilometer-agent-notification - '
                            'NotificationService(0)')
            services.append('ceilometer-api')
        else:
            services.append('ceilometer-collector')
            services.append('ceilometer-api')
            services.append('ceilometer-agent-notification')

            if current_release < CeilometerTest.TRUSTY_MITAKA:
                services.append('ceilometer-alarm-notifier')
                services.append('ceilometer-alarm-evaluator')

            if current_release >= CeilometerTest.TRUSTY_LIBERTY:
                # Liberty and later
                services.append('ceilometer-polling')
            else:
                # Juno and earlier
                services.append('ceilometer-agent-central')

        return services

    # NOTE(beisner): need to add more functional tests

    def test_900_restart_on_config_change(self):
        """Checking restart happens on config change."""
        # Expected default and alternate values
        current_value = openstack_utils.get_application_config_option(
            'ceilometer', 'debug')
        assert type(current_value) == bool
        new_value = not current_value

        # Convert bool to str
        current_value = str(current_value)
        new_value = str(new_value)

        set_default = {'debug': current_value}
        set_alternate = {'debug': new_value}
        default_entry = {'DEFAULT': {'debug': [current_value]}}
        alternate_entry = {'DEFAULT': {'debug': [new_value]}}

        logging.info('changing config: {}'.format(set_alternate))
        self.restart_on_changed(CeilometerTest.CONF_FILE, set_default,
                                set_alternate, default_entry, alternate_entry,
                                self.services)

    def test_901_pause_resume(self):
        """Run pause and resume tests.

        Pause service and check services are stopped then resume and check
        they are started.
        """
        with self.pause_resume(['ceilometer']):
            logging.info("Testing pause and resume")
コード例 #21
0
ファイル: tests.py プロジェクト: sahid/zaza-openstack-tests
    def test_blocked_when_non_pristine_disk_appears(self):
        """Test blocked state with non-pristine disk.

        Validate that charm goes into blocked state when it is presented with
        new block devices that have foreign data on them.
        Instances used in UOSCI has a flavour with ephemeral storage in
        addition to the bootable instance storage.  The ephemeral storage
        device is partitioned, formatted and mounted early in the boot process
        by cloud-init.
        As long as the device is mounted the charm will not attempt to use it.
        If we unmount it and trigger the config-changed hook the block device
        will appear as a new and previously untouched device for the charm.
        One of the first steps of device eligibility checks should be to make
        sure we are seeing a pristine and empty device before doing any
        further processing.
        As the ephemeral device will have data on it we can use it to validate
        that these checks work as intended.
        """
        logging.info('Checking behaviour when non-pristine disks appear...')
        logging.info('Configuring ephemeral-unmount...')
        alternate_conf = {
            'ephemeral-unmount': '/mnt',
            'osd-devices': '/dev/vdb'
        }
        juju_service = 'ceph-osd'
        zaza_model.set_application_config(juju_service, alternate_conf)
        ceph_osd_states = {
            'ceph-osd': {
                'workload-status': 'blocked',
                'workload-status-message': 'Non-pristine'
            }
        }
        zaza_model.wait_for_application_states(states=ceph_osd_states)
        logging.info('Units now in blocked state, running zap-disk action...')
        unit_names = ['ceph-osd/0', 'ceph-osd/1', 'ceph-osd/2']
        for unit_name in unit_names:
            zap_disk_params = {
                'devices': '/dev/vdb',
                'i-really-mean-it': True,
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='zap-disk',
                action_params=zap_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Running add-disk action...')
        for unit_name in unit_names:
            add_disk_params = {
                'osd-devices': '/dev/vdb',
            }
            action_obj = zaza_model.run_action(
                unit_name=unit_name,
                action_name='add-disk',
                action_params=add_disk_params
            )
            logging.debug('Result of action: {}'.format(action_obj))

        logging.info('Wait for idle/ready status...')
        zaza_model.wait_for_application_states()

        logging.info('OK')

        set_default = {
            'ephemeral-unmount': '',
            'osd-devices': '/dev/vdb',
        }

        current_release = zaza_openstack.get_os_release()
        bionic_train = zaza_openstack.get_os_release('bionic_train')
        if current_release < bionic_train:
            set_default['osd-devices'] = '/dev/vdb /srv/ceph'

        logging.info('Restoring to default configuration...')
        zaza_model.set_application_config(juju_service, set_default)

        zaza_model.wait_for_application_states()
コード例 #22
0
class NovaCommonTests(test_utils.OpenStackBaseTest):
    """nova-compute and nova-cloud-controller common tests."""

    XENIAL_MITAKA = openstack_utils.get_os_release('xenial_mitaka')
    XENIAL_OCATA = openstack_utils.get_os_release('xenial_ocata')
    XENIAL_QUEENS = openstack_utils.get_os_release('xenial_queens')
    BIONIC_QUEENS = openstack_utils.get_os_release('bionic_queens')
    BIONIC_ROCKY = openstack_utils.get_os_release('bionic_rocky')
    BIONIC_TRAIN = openstack_utils.get_os_release('bionic_train')

    @classmethod
    def setUpClass(cls):
        """Run class setup for running nova-cloud-controller tests."""
        super(NovaCommonTests, cls).setUpClass()
        cls.current_release = openstack_utils.get_os_release()

    def _test_pci_alias_config(self, app_name, service_list):
        logging.info('Checking pci aliases in nova config...')

        # Expected default and alternate values
        current_value = zaza.model.get_application_config(
            app_name)['pci-alias']
        try:
            current_value = current_value['value']
        except KeyError:
            current_value = None
        new_value = '[{}, {}]'.format(
            json.dumps(
                {
                    'name': 'IntelNIC',
                    'capability_type': 'pci',
                    'product_id': '1111',
                    'vendor_id': '8086',
                    'device_type': 'type-PF'
                },
                sort_keys=True),
            json.dumps(
                {
                    'name': ' Cirrus Logic ',
                    'capability_type': 'pci',
                    'product_id': '0ff2',
                    'vendor_id': '10de',
                    'device_type': 'type-PCI'
                },
                sort_keys=True))

        set_default = {'pci-alias': current_value}
        set_alternate = {'pci-alias': new_value}

        expected_conf_section = 'pci'
        expected_conf_key = 'alias'

        default_entry = {expected_conf_section: {}}
        alternate_entry = {
            expected_conf_section: {
                expected_conf_key:
                [('{"capability_type": "pci", "device_type": "type-PF", '
                  '"name": "IntelNIC", "product_id": "1111", '
                  '"vendor_id": "8086"}'),
                 ('{"capability_type": "pci", "device_type": "type-PCI", '
                  '"name": " Cirrus Logic ", "product_id": "0ff2", '
                  '"vendor_id": "10de"}')]
            }
        }

        # Config file affected by juju set config change
        conf_file = '/etc/nova/nova.conf'

        # Make config change, check for service restarts
        logging.info('Setting config on {} to {}'.format(
            app_name, set_alternate))
        self.restart_on_changed(conf_file, set_default, set_alternate,
                                default_entry, alternate_entry, service_list)
コード例 #23
0
 def setUpClass(cls):
     """Perform class one time initialization."""
     super(BlueStoreCompressionCharmOperation, cls).setUpClass()
     cls.current_release = zaza_openstack.get_os_release()
     cls.bionic_rocky = zaza_openstack.get_os_release('bionic_rocky')