Exemplo n.º 1
0
    def test_102_services(self):
        """Verify the expected services are running on the service units."""

        services = {
            self.mysql_sentry: ['mysql'],
            self.rabbitmq_sentry: ['rabbitmq-server'],
            self.nova_sentry: ['nova-compute'],
            self.keystone_sentry: ['keystone'],
            self.glance_sentry: ['glance-registry',
                                 'glance-api'],
            self.cinder_sentry: ['cinder-api',
                                 'cinder-scheduler',
                                 'cinder-volume'],
            self.ceph_osd_sentry: ['ceph-osd-all'],
        }

        if self._get_openstack_release() < self.vivid_kilo:
            # For upstart systems only.  Ceph services under systemd
            # are checked by process name instead.
            ceph_services = [
                'ceph-mon-all',
                'ceph-mon id=`hostname`'
            ]
            services[self.ceph0_sentry] = ceph_services
            services[self.ceph1_sentry] = ceph_services
            services[self.ceph2_sentry] = ceph_services

        ret = u.validate_services_by_name(services)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 2
0
    def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
        """Check a single juju rmq unit for ssl and port in the config file."""
        host = sentry_unit.info['public-address']
        unit_name = sentry_unit.info['unit_name']

        conf_file = '/etc/rabbitmq/rabbitmq.config'
        conf_contents = str(self.file_contents_safe(sentry_unit,
                                                    conf_file, max_wait=16))
        # Checks
        conf_ssl = 'ssl' in conf_contents
        conf_port = str(port) in conf_contents

        # Port explicitly checked in config
        if port and conf_port and conf_ssl:
            self.log.debug('SSL is enabled  @{}:{} '
                           '({})'.format(host, port, unit_name))
            return True
        elif port and not conf_port and conf_ssl:
            self.log.debug('SSL is enabled @{} but not on port {} '
                           '({})'.format(host, port, unit_name))
            return False
        # Port not checked (useful when checking that ssl is disabled)
        elif not port and conf_ssl:
            self.log.debug('SSL is enabled  @{}:{} '
                           '({})'.format(host, port, unit_name))
            return True
        elif not conf_ssl:
            self.log.debug('SSL not enabled @{}:{} '
                           '({})'.format(host, port, unit_name))
            return False
        else:
            msg = ('Unknown condition when checking SSL status @{}:{} '
                   '({})'.format(host, port, unit_name))
            amulet.raise_status(amulet.FAIL, msg)
Exemplo n.º 3
0
    def test_300_ceph_config(self):
        """Verify the data in the ceph config file."""
        u.log.debug('Checking ceph config file data...')
        unit = self.ceph0_sentry
        conf = '/etc/ceph/ceph.conf'
        expected = {
            'global': {
                'keyring': '/etc/ceph/$cluster.$name.keyring',
                'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
                'log to syslog': 'false',
                'err to syslog': 'false',
                'clog to syslog': 'false',
                'mon cluster log to syslog': 'false',
                'auth cluster required': 'none',
                'auth service required': 'none',
                'auth client required': 'none'
            },
            'mon': {
                'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
            },
            'mds': {
                'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
            },
        }

        for section, pairs in expected.iteritems():
            ret = u.validate_config_data(unit, conf, section, pairs)
            if ret:
                message = "ceph config error: {}".format(ret)
                amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 4
0
    def test_204_nova_cc_identity_service_relation(self):
        """Verify the nova-cc to keystone identity-service relation data"""
        u.log.debug('Checking n-c-c:keystone identity relation data...')
        unit = self.nova_cc_sentry
        relation = ['identity-service', 'keystone:identity-service']
        expected = {
            'nova_internal_url': u.valid_url,
            'nova_public_url': u.valid_url,
            'nova_service': 'nova',
            'private-address': u.valid_ip,
            'nova_region': 'RegionOne',
            'nova_admin_url': u.valid_url,
        }
        if self._get_openstack_release() < self.trusty_kilo:
            expected['s3_admin_url'] = u.valid_url
            expected['s3_internal_url'] = u.valid_url
            expected['s3_public_url'] = u.valid_url
            expected['s3_region'] = 'RegionOne'
            expected['s3_service'] = 's3'
            expected['ec2_admin_url'] = u.valid_url
            expected['ec2_internal_url'] = u.valid_url
            expected['ec2_public_url'] = u.valid_url
            expected['ec2_region'] = 'RegionOne'
            expected['ec2_service'] = 'ec2'

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('nova-cc identity-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 5
0
    def configure_rmq_ssl_on(self, sentry_units, deployment,
                             port=None, max_wait=60):
        """Turn ssl charm config option on, with optional non-default
        ssl port specification.  Confirm that it is enabled on every
        unit.

        :param sentry_units: list of sentry units
        :param deployment: amulet deployment object pointer
        :param port: amqp port, use defaults if None
        :param max_wait: maximum time to wait in seconds to confirm
        :returns: None if successful.  Raise on error.
        """
        self.log.debug('Setting ssl charm config option:  on')

        # Enable RMQ SSL
        config = {'ssl': 'on'}
        if port:
            config['ssl_port'] = port

        deployment.d.configure('rabbitmq-server', config)

        # Wait for unit status
        self.rmq_wait_for_cluster(deployment)

        # Confirm
        tries = 0
        ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
        while ret and tries < (max_wait / 4):
            time.sleep(4)
            self.log.debug('Attempt {}: {}'.format(tries, ret))
            ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
            tries += 1

        if ret:
            amulet.raise_status(amulet.FAIL, ret)
Exemplo n.º 6
0
    def _add_services(self, this_service, other_services):
        """Add services.

           Add services to the deployment where this_service is the local charm
           that we're testing and other_services are the other services that
           are being used in the local amulet tests.
           """
        if this_service['name'] != os.path.basename(os.getcwd()):
            s = this_service['name']
            msg = "The charm's root directory name needs to be {}".format(s)
            amulet.raise_status(amulet.FAIL, msg=msg)

        if 'units' not in this_service:
            this_service['units'] = 1

        self.d.add(this_service['name'], units=this_service['units'],
                   constraints=this_service.get('constraints'))

        for svc in other_services:
            if 'location' in svc:
                branch_location = svc['location']
            elif self.series:
                branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
            else:
                branch_location = None

            if 'units' not in svc:
                svc['units'] = 1

            self.d.add(svc['name'], charm=branch_location, units=svc['units'],
                       constraints=svc.get('constraints'))
Exemplo n.º 7
0
    def test_301_cinder_logging_config(self):
        """Verify the data in the cinder logging conf file."""
        u.log.debug('Checking cinder logging config file data...')
        unit = self.cinder_sentry
        conf = '/etc/cinder/logging.conf'

        expected = {
            'loggers': {
                'keys': 'root, cinder'
            },
            'logger_cinder': {
                'level': 'INFO',
                'handlers': 'stderr',
                'qualname': 'cinder'
            },
            'logger_root': {
                'level': 'WARNING',
                'handlers': 'null'
            }
        }

        for section, pairs in expected.iteritems():
            ret = u.validate_config_data(unit, conf, section, pairs)
            if ret:
                message = "cinder logging config error: {}".format(ret)
                amulet.raise_status(amulet.FAIL, msg=message)
    def test_102_service_catalog(self):
        """Verify that the service catalog endpoint data is valid."""
        u.log.debug('Checking keystone service catalog...')

        endpoint_vol = {'adminURL': u.valid_url,
                        'region': 'RegionOne',
                        'id': u.not_null,
                        'publicURL': u.valid_url,
                        'internalURL': u.valid_url}
        endpoint_id = {'adminURL': u.valid_url,
                       'region': 'RegionOne',
                       'id': u.not_null,
                       'publicURL': u.valid_url,
                       'internalURL': u.valid_url}

        if self._get_openstack_release() >= self.trusty_kilo:
            expected = {'compute': [endpoint_vol], 'identity': [endpoint_id]}
        else:
            expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol],
                        'ec2': [endpoint_vol], 'identity': [endpoint_id]}
        actual = self.keystone_demo.service_catalog.get_endpoints()

        ret = u.validate_svc_catalog_endpoint_data(expected, actual)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
    def test_100_services(self):
        """Verify the expected services are running on the corresponding
           service units."""
        u.log.debug('Checking system services on units...')

        services = {
            self.rabbitmq_sentry: ['rabbitmq-server'],
            self.nova_compute_sentry: ['nova-compute',
                                       'nova-network',
                                       'nova-api'],
            self.nova_cc_sentry: ['nova-conductor'],
            self.keystone_sentry: ['keystone'],
            self.glance_sentry: ['glance-registry',
                                 'glance-api']
        }

        if self._get_openstack_release() >= self.trusty_liberty:
            services[self.keystone_sentry] = ['apache2']

        if self._get_openstack_release_string() >= 'ocata':
            services[self.nova_compute_sentry].remove('nova-network')
            services[self.nova_compute_sentry].remove('nova-api')

        ret = u.validate_services_by_name(services)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 10
0
    def test_110_service_catalog(self):
        """Verify that the service catalog endpoint data is valid."""
        if self._get_openstack_release() >= self.xenial_pike:
            u.log.debug('Skipping catalogue checks as ceilometer no longer '
                        'registers endpoints')
            return
        u.log.debug('Checking keystone service catalog data...')
        endpoint_check = {
            'adminURL': u.valid_url,
            'id': u.not_null,
            'region': 'RegionOne',
            'publicURL': u.valid_url,
            'internalURL': u.valid_url
        }
        expected = {
            'metering': [endpoint_check],
            'identity': [endpoint_check]
        }
        actual = self.keystone.service_catalog.get_endpoints()

        ret = u.validate_svc_catalog_endpoint_data(
            expected,
            actual,
            openstack_release=self._get_openstack_release())
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)

        u.log.debug('OK')
Exemplo n.º 11
0
    def test_100_services(self):
        """Verify the expected services are running on the corresponding
           service units."""
        u.log.debug('Checking system services on units...')
        release = self._get_openstack_release()
        ceilometer_svcs = [
            'ceilometer-agent-central',
            'ceilometer-agent-notification',
        ]
        if release < self.xenial_pike:
            ceilometer_svcs.append('ceilometer-collector')

        if (release >= self.xenial_ocata and release < self.xenial_pike):
            ceilometer_svcs.append('apache2')

        if release < self.xenial_ocata:
            ceilometer_svcs.append('ceilometer-api')

        if release < self.trusty_mitaka:
            ceilometer_svcs.append('ceilometer-alarm-evaluator')
            ceilometer_svcs.append('ceilometer-alarm-notifier')

        service_names = {
            self.ceil_sentry: ceilometer_svcs,
        }

        ret = u.validate_services_by_name(service_names)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)

        u.log.debug('OK')
Exemplo n.º 12
0
    def test_200_ceilometer_identity_relation(self):
        """Verify the ceilometer to keystone identity-service relation data"""
        if self._get_openstack_release() >= self.xenial_pike:
            u.log.debug('Skipping identity-service checks as ceilometer no '
                        'longer has this rerlation')
            return
        u.log.debug('Checking ceilometer to keystone identity-service '
                    'relation data...')
        unit = self.ceil_sentry
        relation = ['identity-service', 'keystone:identity-service']
        ceil_ip = unit.relation('identity-service',
                                'keystone:identity-service')['private-address']
        ceil_endpoint = "http://%s:8777" % (ceil_ip)

        expected = {
            'admin_url': ceil_endpoint,
            'internal_url': ceil_endpoint,
            'private-address': ceil_ip,
            'public_url': ceil_endpoint,
            'region': 'RegionOne',
            'requested_roles': 'ResellerAdmin',
            'service': 'ceilometer',
        }

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('ceilometer identity-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 13
0
    def test_112_keystone_api_endpoint(self):
        """Verify the ceilometer api endpoint data."""
        if self._get_openstack_release() >= self.xenial_pike:
            u.log.debug('Skipping catalogue checks as ceilometer no longer '
                        'registers endpoints')
            return
        u.log.debug('Checking keystone api endpoint data...')
        endpoints = self.keystone.endpoints.list()
        u.log.debug(endpoints)
        internal_port = public_port = '5000'
        admin_port = '35357'
        expected = {'id': u.not_null,
                    'region': 'RegionOne',
                    'adminurl': u.valid_url,
                    'internalurl': u.valid_url,
                    'publicurl': u.valid_url,
                    'service_id': u.not_null}

        ret = u.validate_endpoint_data(
            endpoints,
            admin_port,
            internal_port,
            public_port,
            expected,
            openstack_release=self._get_openstack_release())
        if ret:
            message = 'Keystone endpoint: {}'.format(ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 14
0
    def test_201_keystone_ceilometer_identity_relation(self):
        """Verify the keystone to ceilometer identity-service relation data"""
        if self._get_openstack_release() >= self.xenial_pike:
            u.log.debug('Skipping identity-service checks as ceilometer no '
                        'longer has this rerlation')
            return
        u.log.debug('Checking keystone:ceilometer identity relation data...')
        unit = self.keystone_sentry
        relation = ['identity-service', 'ceilometer:identity-service']
        id_relation = unit.relation('identity-service',
                                    'ceilometer:identity-service')
        id_ip = id_relation['private-address']
        expected = {
            'admin_token': 'ubuntutesting',
            'auth_host': id_ip,
            'auth_port': "35357",
            'auth_protocol': 'http',
            'private-address': id_ip,
            'service_host': id_ip,
            'service_password': u.not_null,
            'service_port': "5000",
            'service_protocol': 'http',
            'service_tenant': 'services',
            'service_tenant_id': u.not_null,
            'service_username': '******',
        }
        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('keystone identity-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 15
0
    def test_202_keystone_ceilometer_identity_notes_relation(self):
        """Verify ceilometer to keystone identity-notifications relation"""
        u.log.debug('Checking keystone:ceilometer '
                    'identity-notifications relation data...')

        # Relation data may vary depending on timing of hooks and relations.
        # May be glance- or keystone- or another endpoint-changed value, so
        # check that at least one ???-endpoint-changed value exists.
        unit = self.keystone_sentry
        relation_data = unit.relation('identity-notifications',
                                      'ceilometer:identity-notifications')

        expected = '-endpoint-changed'
        found = 0
        for key in relation_data.keys():
            if expected in key and relation_data[key]:
                found += 1
                u.log.debug('{}: {}'.format(key, relation_data[key]))

        if not found:
            message = ('keystone:ceilometer identity-notification relation '
                       'error\n expected something like: {}\n actual: '
                       '{}'.format(expected, relation_data))
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 16
0
    def test_207_ceilometer_ceilometer_agent_relation(self):
        """Verify the ceilometer to ceilometer-agent relation data"""
        u.log.debug('Checking ceilometer:ceilometer-agent relation data...')
        unit = self.ceil_sentry
        relation = ['ceilometer-service',
                    'ceilometer-agent:ceilometer-service']
        expected = {
            'rabbitmq_user': '******',
            'verbose': 'False',
            'rabbitmq_host': u.valid_ip,
            'use_syslog': 'False',
            'metering_secret': u.not_null,
            'rabbitmq_virtual_host': 'openstack',
            'private-address': u.valid_ip,
            'debug': 'False',
            'rabbitmq_password': u.not_null,
            'port': '8767'
        }
        if self._get_openstack_release() >= self.xenial_pike:
            expected['gnocchi_url'] = u.valid_url
            if self._get_openstack_release() >= self.xenial_queens:
                expected['port'] = '8777'
        else:
            expected['db_port'] = '27017'
            expected['db_name'] = 'ceilometer'
            expected['db_host'] = u.valid_ip
            expected['service_ports'] = "{'ceilometer_api': [8777, 8767]}"

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('ceilometer-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 17
0
    def get_amqp_message_by_unit(self, sentry_unit, queue="test",
                                 username="******",
                                 password="******",
                                 ssl=False, port=None):
        """Get an amqp message from a rmq juju unit.

        :param sentry_unit: sentry unit pointer
        :param queue: message queue, default to test
        :param username: amqp user name, default to testuser1
        :param password: amqp user password
        :param ssl: boolean, default to False
        :param port: amqp port, use defaults if None
        :returns: amqp message body as string.  Raise if get fails.
        """
        connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
                                               port=port,
                                               username=username,
                                               password=password)
        channel = connection.channel()
        method_frame, _, body = channel.basic_get(queue)

        if method_frame:
            self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
                                                                         body))
            channel.basic_ack(method_frame.delivery_tag)
            channel.close()
            connection.close()
            return body
        else:
            msg = 'No message retrieved.'
            amulet.raise_status(amulet.FAIL, msg)
Exemplo n.º 18
0
    def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
        """Turn ssl charm config option off, confirm that it is disabled
        on every unit.

        :param sentry_units: list of sentry units
        :param deployment: amulet deployment object pointer
        :param max_wait: maximum time to wait in seconds to confirm
        :returns: None if successful.  Raise on error.
        """
        self.log.debug('Setting ssl charm config option:  off')

        # Disable RMQ SSL
        config = {'ssl': 'off'}
        deployment.d.configure('rabbitmq-server', config)

        # Wait for unit status
        self.rmq_wait_for_cluster(deployment)

        # Confirm
        tries = 0
        ret = self.validate_rmq_ssl_disabled_units(sentry_units)
        while ret and tries < (max_wait / 4):
            time.sleep(4)
            self.log.debug('Attempt {}: {}'.format(tries, ret))
            ret = self.validate_rmq_ssl_disabled_units(sentry_units)
            tries += 1

        if ret:
            amulet.raise_status(amulet.FAIL, ret)
Exemplo n.º 19
0
    def test_304_glance_rbd_config(self):
        """Verify the glance config file data regarding ceph."""
        u.log.debug('Checking glance (rbd) config file data...')
        unit = self.glance_sentry
        conf = '/etc/glance/glance-api.conf'
        config = {
            'default_store': 'rbd',
            'rbd_store_ceph_conf': '/etc/ceph/ceph.conf',
            'rbd_store_user': '******',
            'rbd_store_pool': 'glance',
            'rbd_store_chunk_size': '8'
        }

        if self._get_openstack_release() >= self.trusty_kilo:
            # Kilo or later
            config['stores'] = ('glance.store.filesystem.Store,'
                                'glance.store.http.Store,'
                                'glance.store.rbd.Store')
            section = 'glance_store'
        else:
            # Juno or earlier
            section = 'DEFAULT'

        expected = {section: config}
        for section, pairs in expected.iteritems():
            ret = u.validate_config_data(unit, conf, section, pairs)
            if ret:
                message = "glance (rbd) config error: {}".format(ret)
                amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 20
0
    def test_900_glance_restart_on_config_change(self):
        """Verify that the specified services are restarted when the config
           is changed."""
        sentry = self.glance_sentry
        juju_service = 'glance'

        # Expected default and alternate values
        set_default = {'use-syslog': 'False'}
        set_alternate = {'use-syslog': 'True'}

        # Config file affected by juju set config change
        conf_file = '/etc/glance/glance-api.conf'

        # Services which are expected to restart upon config change
        services = ['glance-api', 'glance-registry']

        # Make config change, check for service restarts
        u.log.debug('Making config change on {}...'.format(juju_service))
        self.d.configure(juju_service, set_alternate)

        sleep_time = 30
        for s in services:
            u.log.debug("Checking that service restarted: {}".format(s))
            if not u.service_restarted(sentry, s,
                                       conf_file, sleep_time=sleep_time):
                self.d.configure(juju_service, set_default)
                msg = "service {} didn't restart after config change".format(s)
                amulet.raise_status(amulet.FAIL, msg=msg)
            sleep_time = 0

        self.d.configure(juju_service, set_default)
Exemplo n.º 21
0
    def test_206_keystone_identity_service_relation(self):
        """Verify the keystone to nova-cc identity-service relation data"""
        u.log.debug('Checking keystone:n-c-c identity relation data...')
        unit = self.keystone_sentry
        relation = ['identity-service',
                    'nova-cloud-controller:identity-service']
        expected = {
            'service_protocol': 'http',
            'service_tenant': 'services',
            'admin_token': 'ubuntutesting',
            'service_password': u.not_null,
            'service_port': '5000',
            'auth_port': '35357',
            'auth_protocol': 'http',
            'private-address': u.valid_ip,
            'auth_host': u.valid_ip,
            'service_username': '******',
            'service_tenant_id': u.not_null,
            'service_host': u.valid_ip
        }
        if self._get_openstack_release() >= self.trusty_kilo:
            expected['service_username'] = '******'

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('keystone identity-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 22
0
    def validate_keystone_tenants(self, client):
        """Verify all existing tenants."""
        u.log.debug('Checking keystone tenants...')
        expected = [
            {'name': 'services',
             'enabled': True,
             'description': 'Created by Juju',
             'id': u.not_null},
            {'name': 'demoTenant',
             'enabled': True,
             'description': 'demo tenant',
             'id': u.not_null},
            {'name': 'admin',
             'enabled': True,
             'description': 'Created by Juju',
             'id': u.not_null}
        ]
        if self.keystone_api_version == 2:
            actual = client.tenants.list()
        else:
            actual = client.projects.list()

        ret = u.validate_tenant_data(expected, actual)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 23
0
 def validate_keystone_users(self, client):
     """Verify all existing roles."""
     u.log.debug('Checking keystone users...')
     base = [
         {'name': 'demoUser',
          'enabled': True,
          'id': u.not_null,
          'email': '*****@*****.**'},
         {'name': 'admin',
          'enabled': True,
          'id': u.not_null,
          'email': 'juju@localhost'},
         {'name': 'cinder_cinderv2',
          'enabled': True,
          'id': u.not_null,
          'email': u'juju@localhost'}
     ]
     expected = []
     for user_info in base:
         if self.keystone_api_version == 2:
             user_info['tenantId'] = u.not_null
         else:
             user_info['default_project_id'] = u.not_null
         expected.append(user_info)
     actual = client.users.list()
     ret = u.validate_user_data(expected, actual,
                                api_version=self.keystone_api_version)
     if ret:
         amulet.raise_status(amulet.FAIL, msg=ret)
    def test_400_enable_qos(self):
        """Check qos settings set via neutron-api charm"""
        if self._get_openstack_release() >= self.trusty_mitaka:
            unit = self.n_ovs_sentry
            set_default = {'enable-qos': 'False'}
            set_alternate = {'enable-qos': 'True'}
            self.d.configure('neutron-api', set_alternate)
            self._wait_and_check(sleep=60)
            qos_plugin = 'qos'
            config = u._get_config(
                self.neutron_api_sentry, '/etc/neutron/neutron.conf')
            service_plugins = config.get(
                'DEFAULT',
                'service_plugins').split(',')
            if qos_plugin not in service_plugins:
                message = "{} not in service_plugins".format(qos_plugin)
                amulet.raise_status(amulet.FAIL, msg=message)

            config = u._get_config(
                unit,
                '/etc/neutron/plugins/ml2/openvswitch_agent.ini')
            extensions = config.get('agent', 'extensions').split(',')
            if qos_plugin not in extensions:
                message = "qos not in extensions"
                amulet.raise_status(amulet.FAIL, msg=message)

            u.log.debug('Setting QoS back to {}'.format(
                set_default['enable-qos']))
            self.d.configure('neutron-api', set_default)
            self._wait_and_check()
            u.log.debug('OK')
Exemplo n.º 25
0
    def test_121_keystone_demo_domain_admin_access(self):
        """Verify that end-user domain admin does not have elevated
           privileges. Catch regressions like LP#1651989"""
        if self.is_mitaka_or_newer():
            u.log.debug('Checking keystone end-user domain admin access...')
            self.set_api_version(3)
            # Authenticate as end-user domain admin and verify that we have
            # appropriate access.
            client = u.authenticate_keystone(
                self.keystone_sentries[0].info['public-address'],
                username=self.demo_domain_admin,
                password='******',
                api_version=3,
                user_domain_name=self.demo_domain,
                domain_name=self.demo_domain,
            )

            try:
                # Expect failure
                client.domains.list()
            except Exception as e:
                message = ('Retrieve domain list as end-user domain admin '
                           'NOT allowed...OK ({})'.format(e))
                u.log.debug(message)
                pass
            else:
                message = ('Retrieve domain list as end-user domain admin '
                           'allowed')
                amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 26
0
    def test_122_keystone_project_scoped_admin_access(self):
        """Verify that user admin in domain admin_domain has access to
           identity-calls guarded by rule:cloud_admin when using project
           scoped token."""
        if self.is_mitaka_or_newer():
            u.log.debug('Checking keystone project scoped admin access...')
            self.set_api_version(3)
            # Authenticate as end-user domain admin and verify that we have
            # appropriate access.
            client = u.authenticate_keystone(
                self.keystone_sentries[0].info['public-address'],
                username='******',
                password='******',
                api_version=3,
                admin_port=True,
                user_domain_name='admin_domain',
                project_domain_name='admin_domain',
                project_name='admin',
            )

            try:
                client.domains.list()
                u.log.debug('OK')
            except Exception as e:
                message = ('Retrieve domain list as admin with project scoped '
                           'token FAILED. ({})'.format(e))
                amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 27
0
    def test_900_keystone_restart_on_config_change(self):
        """Verify that the specified services are restarted when the config
           is changed."""
        sentry = self.keystone_sentry
        juju_service = 'keystone'

        # Expected default and alternate values
        set_default = {'use-syslog': 'False'}
        set_alternate = {'use-syslog': 'True'}

        # Services which are expected to restart upon config change,
        # and corresponding config files affected by the change
        services = {'keystone-all': '/etc/keystone/keystone.conf'}

        # Make config change, check for service restarts
        u.log.debug('Making config change on {}...'.format(juju_service))
        mtime = u.get_sentry_time(sentry)
        self.d.configure(juju_service, set_alternate)

        sleep_time = 30
        for s, conf_file in services.iteritems():
            u.log.debug("Checking that service restarted: {}".format(s))
            if not u.validate_service_config_changed(sentry, mtime, s,
                                                     conf_file,
                                                     sleep_time=sleep_time):

                self.d.configure(juju_service, set_default)
                msg = "service {} didn't restart after config change".format(s)
                amulet.raise_status(amulet.FAIL, msg=msg)

        self.d.configure(juju_service, set_default)

        u.log.debug('OK')
Exemplo n.º 28
0
    def test_200_aodh_identity_relation(self):
        """Verify the aodh to keystone identity-service relation data"""
        u.log.debug('Checking aodh to keystone identity-service '
                    'relation data...')
        unit = self.aodh_sentry
        relation = ['identity-service', 'keystone:identity-service']
        aodh_ip = unit.relation('identity-service',
                                'keystone:identity-service')['private-address']
        aodh_endpoint = "http://%s:8042" % (aodh_ip)

        expected = {
            'admin_url': aodh_endpoint,
            'internal_url': aodh_endpoint,
            'private-address': aodh_ip,
            'public_url': aodh_endpoint,
            'region': 'RegionOne',
            'service': 'aodh',
        }

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('aodh identity-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 29
0
    def test_201_keystone_aodh_identity_relation(self):
        """Verify the keystone to aodh identity-service relation data"""
        u.log.debug('Checking keystone:aodh identity relation data...')
        unit = self.keystone_sentry
        relation = ['identity-service', 'aodh:identity-service']
        id_relation = unit.relation('identity-service',
                                    'aodh:identity-service')
        id_ip = id_relation['private-address']
        expected = {
            'admin_token': 'ubuntutesting',
            'auth_host': id_ip,
            'auth_port': "35357",
            'auth_protocol': 'http',
            'private-address': id_ip,
            'service_host': id_ip,
            'service_password': u.not_null,
            'service_port': "5000",
            'service_protocol': 'http',
            'service_tenant': 'services',
            'service_tenant_id': u.not_null,
            'service_username': '******',
        }
        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('keystone identity-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 30
0
    def test_200_barbican_identity_relation(self):
        """Verify the barbican to keystone identity-service relation data"""
        u.log.debug('Checking barbican to keystone identity-service '
                    'relation data...')
        unit = self.barbican_sentry
        relation = ['identity-service', 'keystone:identity-service']
        barbican_ip = unit.relation(*relation)['private-address']
        barbican_admin_endpoint = "http://%s:9312" % (barbican_ip)
        barbican_endpoint = "http://%s:9311" % (barbican_ip)

        expected = {
            'admin_url': barbican_admin_endpoint,
            'internal_url': barbican_endpoint,
            'private-address': barbican_ip,
            'public_url': barbican_endpoint,
            'region': 'RegionOne',
            'service': 'barbican',
        }

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('barbican identity-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 31
0
    def test_302_glance_registry_default_config(self):
        """Verify configs in glance-registry.conf"""
        u.log.debug('Checking glance registry config file...')
        unit = self.glance_sentry
        rel_my_gl = self.pxc_sentry.relation('shared-db', 'glance:shared-db')
        db_uri = "mysql+pymysql://{}:{}@{}/{}".format('glance',
                                                      rel_my_gl['password'],
                                                      rel_my_gl['db_host'],
                                                      'glance')
        conf = '/etc/glance/glance-registry.conf'

        expected = {
            'DEFAULT': {
                'use_syslog': 'False',
                'log_file': '/var/log/glance/registry.log',
                'debug': 'False',
                'verbose': 'False',
                'bind_host': '0.0.0.0',
                'bind_port': '9191'
            },
        }

        if self._get_openstack_release() >= self.trusty_kilo:
            # Kilo or later
            expected['database'] = {
                'idle_timeout': '3600',
                'connection': db_uri
            }
        else:
            # Juno or earlier
            expected['database'] = {
                'idle_timeout': '3600',
                'connection': db_uri
            }

        for section, pairs in expected.iteritems():
            ret = u.validate_config_data(unit, conf, section, pairs)
            if ret:
                message = "glance registry paste config error: {}".format(ret)
                amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 32
0
def ubuntu_basic_deployment(series):
    """ Common test routines to run per-series. """

    # Initialize
    seconds = 900
    u = AmuletUtils(logging.DEBUG)
    d = amulet.Deployment(series=series)
    d.add('ubuntu')

    # Deploy services, wait for started state.  Fail or skip on timeout.
    try:
        d.setup(timeout=seconds)
        sentry_unit = d.sentry['ubuntu'][0]
    except amulet.helpers.TimeoutError:
        message = 'Deployment timed out ({}s)'.format(seconds)
        amulet.raise_status(amulet.FAIL, msg=message)
    except:
        raise

    # Confirm Ubuntu release name from the unit.
    release, ret = u.get_ubuntu_release_from_sentry(sentry_unit)
    if ret:
        # Something went wrong trying to query the unit, or it is an
        # unknown/alien release name based on distro-info validation.
        amulet.raise_status(amulet.FAIL, msg=ret)

    if release == series:
        u.log.info('Release/series check:  OK')
    else:
        msg = 'Release/series check:  FAIL ({} != {})'.format(release, series)
        u.log.error(msg)
        amulet.raise_status(amulet.FAIL, msg=msg)
Exemplo n.º 33
0
    def test_138_service_catalog(self):
        """Verify that the service catalog endpoint data is valid."""
        u.log.debug('Checking keystone service catalog...')
        expected = {
            u'identity': [{u'id': u.not_null,
                           u'interface': u'admin',
                           u'region': u'RegionOne',
                           u'region_id': u'RegionOne',
                           u'url': u.valid_url},
                          {u'id': u.not_null,
                           u'interface': u'public',
                           u'region': u'RegionOne',
                           u'region_id': u'RegionOne',
                           u'url': u.valid_url},
                          {u'id': u.not_null,
                           u'interface': u'internal',
                           u'region': u'RegionOne',
                           u'region_id': u'RegionOne',
                           u'url': u.valid_url}],

            u'volumev2': [{u'id': u.not_null,
                           u'interface': u'admin',
                           u'region': u'RegionOne',
                           u'region_id': u'RegionOne',
                           u'url': u.valid_url},
                          {u'id': u.not_null,
                           u'interface': u'public',
                           u'region': u'RegionOne',
                           u'region_id': u'RegionOne',
                           u'url': u.valid_url},
                          {u'id': u.not_null,
                           u'interface': u'internal',
                           u'region': u'RegionOne',
                           u'region_id': u'RegionOne',
                           u'url': u.valid_url}]}

        actual = self.keystone_client.service_catalog.get_endpoints()
        ret = u.validate_v3_svc_catalog_endpoint_data(expected, actual)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 34
0
    def test_900_restart_on_config_change(self):
        """Verify that the specified services are restarted when the
        config is changed."""

        sentry = self.openstack_dashboard_sentry
        juju_service = 'openstack-dashboard'

        # Expected default and alternate values
        set_default = {'use-syslog': 'False'}
        set_alternate = {'use-syslog': 'True'}

        # Services which are expected to restart upon config change,
        # and corresponding config files affected by the change
        services = {
            'apache2': '/etc/openstack-dashboard/local_settings.py',
            'memcached': '/etc/openstack-dashboard/local_settings.py'
        }

        # Make config change, check for service restarts
        u.log.debug('Making config change on {}...'.format(juju_service))
        mtime = u.get_sentry_time(sentry)
        self.d.configure(juju_service, set_alternate)

        sleep_time = 30
        for s, conf_file in services.iteritems():
            u.log.debug("Checking that service restarted: {}".format(s))
            if not u.validate_service_config_changed(sentry,
                                                     mtime,
                                                     s,
                                                     conf_file,
                                                     retry_count=6,
                                                     retry_sleep_time=20,
                                                     sleep_time=sleep_time):

                self.d.configure(juju_service, set_default)
                msg = "service {} didn't restart after config change".format(s)
                amulet.raise_status(amulet.FAIL, msg=msg)
            sleep_time = 0

        self.d.configure(juju_service, set_default)
Exemplo n.º 35
0
    def test_stop_start_carte(self):

        output, code = self.unit.run('pgrep -af org.pentaho.di.www.Carte '
                                     '| grep -v pgrep')
        print(output)
        if code != 0:
            message = 'Carte is not running!'
            amulet.raise_status(amulet.FAIL, msg=message)

        self.d.configure('pdi', {'run_carte': False})
        self.d.sentry.wait()
        output2, code2 = self.unit.run('pgrep -af org.pentaho.di.www.Carte '
                                       '| grep -v pgrep')
        print(output2)
        if code2 == 0:
            message = 'Carte is still running!'
            amulet.raise_status(amulet.FAIL, msg=message)

        self.d.configure('pdi', {'run_carte': True})
        self.d.sentry.wait()
        output3, code3 = self.unit.run('pgrep -af org.pentaho.di.www.Carte  '
                                       '|grep -v pgrep')
        print(output3)
        if code != 0:
            message = 'Carte is not running!'
            amulet.raise_status(amulet.FAIL, msg=message)
    def test_400_create_network(self):
        """Create a network, verify that it exists, and then delete it."""
        u.log.debug('Creating neutron network...')
        self.neutron.format = 'json'
        net_name = 'ext_net'

        # Verify that the network doesn't exist
        networks = self.neutron.list_networks(name=net_name)
        net_count = len(networks['networks'])
        if net_count != 0:
            msg = "Expected zero networks, found {}".format(net_count)
            amulet.raise_status(amulet.FAIL, msg=msg)

        # Create a network and verify that it exists
        network = {'name': net_name}
        self.neutron.create_network({'network': network})

        networks = self.neutron.list_networks(name=net_name)
        u.log.debug('Networks: {}'.format(networks))
        net_len = len(networks['networks'])
        if net_len != 1:
            msg = "Expected 1 network, found {}".format(net_len)
            amulet.raise_status(amulet.FAIL, msg=msg)

        u.log.debug('Confirming new neutron network...')
        network = networks['networks'][0]
        if network['name'] != net_name:
            amulet.raise_status(amulet.FAIL, msg="network ext_net not found")

        # Cleanup
        u.log.debug('Deleting neutron network...')
        self.neutron.delete_network(network['id'])
    def test_100_services(self):
        """Verify the expected services are running on the corresponding
           service units."""
        u.log.debug('Checking system services on units...')

        services = {
            self.compute_sentry:
            ['nova-compute', 'neutron-plugin-openvswitch-agent'],
            self.rabbitmq_sentry: ['rabbitmq-server'],
            self.neutron_api_sentry: ['neutron-server'],
        }

        if self._get_openstack_release() >= self.trusty_mitaka:
            services[self.compute_sentry] = [
                'nova-compute', 'neutron-openvswitch-agent'
            ]

        ret = u.validate_services_by_name(services)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)

        u.log.debug('OK')
Exemplo n.º 38
0
    def test_410_ceph_cinder_vol_create(self):
        """Create and confirm a ceph-backed cinder volume, and inspect
        ceph cinder pool object count as the volume is created
        and deleted."""
        sentry_unit = self.ceph0_sentry
        obj_count_samples = []
        pool_size_samples = []
        pools = u.get_ceph_pools(self.ceph0_sentry)
        cinder_pool = pools['cinder-ceph']

        # Check ceph cinder pool object count, disk space usage and pool name
        u.log.debug('Checking ceph cinder pool original samples...')
        pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
            sentry_unit, cinder_pool)
        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        expected = 'cinder-ceph'
        if pool_name != expected:
            msg = ('Ceph pool {} unexpected name (actual, expected): '
                   '{}. {}'.format(cinder_pool, pool_name, expected))
            amulet.raise_status(amulet.FAIL, msg=msg)

        # Create ceph-backed cinder volume
        cinder_vol = u.create_cinder_volume(self.cinder)

        # Re-check ceph cinder pool object count and disk usage
        time.sleep(10)
        u.log.debug('Checking ceph cinder pool samples after volume create...')
        pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
            sentry_unit, cinder_pool)
        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        # Delete ceph-backed cinder volume
        u.delete_resource(self.cinder.volumes, cinder_vol, msg="cinder volume")

        # Final check, ceph cinder pool object count and disk usage
        time.sleep(10)
        u.log.debug('Checking ceph cinder pool after volume delete...')
        pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
            sentry_unit, cinder_pool)
        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        # Validate ceph cinder pool object count samples over time
        ret = u.validate_ceph_pool_samples(obj_count_samples,
                                           "cinder pool object count")
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)

        # Luminous (pike) ceph seems more efficient at disk usage so we cannot
        # grantee the ordering of kb_used
        if self._get_openstack_release() < self.xenial_pike:
            # Validate ceph cinder pool disk space usage samples over time
            ret = u.validate_ceph_pool_samples(pool_size_samples,
                                               "cinder pool disk usage")
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 39
0
    def test_412_ceph_glance_image_create_delete(self):
        """Create and confirm a ceph-backed glance image, and inspect
        ceph glance pool object count as the image is created
        and deleted."""
        sentry_unit = self.ceph0_sentry
        obj_count_samples = []
        pool_size_samples = []
        pools = u.get_ceph_pools(self.ceph0_sentry)
        glance_pool = pools['glance']

        # Check ceph glance pool object count, disk space usage and pool name
        u.log.debug('Checking ceph glance pool original samples...')
        pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
            sentry_unit, glance_pool)
        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        expected = 'glance'
        if pool_name != expected:
            msg = ('Ceph glance pool {} unexpected name (actual, '
                   'expected): {}. {}'.format(glance_pool, pool_name,
                                              expected))
            amulet.raise_status(amulet.FAIL, msg=msg)

        # Create ceph-backed glance image
        glance_img = u.create_cirros_image(self.glance, "cirros-image-1")

        # Re-check ceph glance pool object count and disk usage
        time.sleep(10)
        u.log.debug('Checking ceph glance pool samples after image create...')
        pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
            sentry_unit, glance_pool)
        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        # Delete ceph-backed glance image
        u.delete_resource(self.glance.images,
                          glance_img.id,
                          msg="glance image")

        # Final check, ceph glance pool object count and disk usage
        time.sleep(10)
        u.log.debug('Checking ceph glance pool samples after image delete...')
        pool_name, obj_count, kb_used = u.get_ceph_pool_sample(
            sentry_unit, glance_pool)
        obj_count_samples.append(obj_count)
        pool_size_samples.append(kb_used)

        # Validate ceph glance pool object count samples over time
        ret = u.validate_ceph_pool_samples(obj_count_samples,
                                           "glance pool object count")
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)

        # Validate ceph glance pool disk space usage samples over time
        ret = u.validate_ceph_pool_samples(pool_size_samples,
                                           "glance pool disk usage")
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
    def test_104_openstack_compute_api_endpoint(self):
        """Verify the openstack compute api (osapi) endpoint data."""
        u.log.debug('Checking compute endpoint data...')

        endpoints = self.keystone.endpoints.list()
        admin_port = internal_port = public_port = '8774'
        expected = {
            'id': u.not_null,
            'region': 'RegionOne',
            'adminurl': u.valid_url,
            'internalurl': u.valid_url,
            'publicurl': u.valid_url,
            'service_id': u.not_null
        }

        ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
                                       public_port, expected)
        if ret:
            message = 'osapi endpoint: {}'.format(ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('Ok')
Exemplo n.º 41
0
 def test_203_cinder_keystone_identity_service_relation(self):
     """Verify the cinder identity-service relation data"""
     u.log.debug('Checking cinder to keystone id relation data...')
     unit = self.cinder_sentry
     relation = ['identity-service', 'keystone:identity-service']
     expected = {
         'cinder_service': 'cinder',
         'cinder_region': 'RegionOne',
         'cinder_public_url': u.valid_url,
         'cinder_internal_url': u.valid_url,
         'cinder_admin_url': u.valid_url,
         'cinderv2_service': 'cinderv2',
         'cinderv2_region': 'RegionOne',
         'cinderv2_public_url': u.valid_url,
         'cinderv2_internal_url': u.valid_url,
         'cinderv2_admin_url': u.valid_url,
         'private-address': u.valid_ip,
     }
     ret = u.validate_relation_data(unit, relation, expected)
     if ret:
         message = u.relation_error('cinder identity-service', ret)
         amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 42
0
    def test_205_nova_cc_network_service_relation(self):
        """Verify the nova-cc to neutron-gateway quantum-network-service
           relation data"""
        u.log.debug('Checking nova-cc:neutron-gateway net svc '
                    'relation data...')
        unit = self.nova_cc_sentry
        relation = [
            'quantum-network-service',
            'neutron-gateway:quantum-network-service'
        ]
        expected = {
            'service_protocol': 'http',
            'service_tenant': 'services',
            'quantum_url': u.valid_url,
            'quantum_port': '9696',
            'service_port': '5000',
            'region': 'RegionOne',
            'service_password': u.not_null,
            'quantum_host': u.valid_ip,
            'auth_port': '35357',
            'auth_protocol': 'http',
            'private-address': u.valid_ip,
            'keystone_host': u.valid_ip,
            'quantum_plugin': 'ovs',
            'auth_host': u.valid_ip,
            'service_tenant_name': 'services'
        }

        if self._get_openstack_release() >= self.trusty_kilo:
            # Kilo or later
            expected['service_username'] = '******'
        else:
            # Juno or earlier
            expected['service_username'] = '******'

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error('nova-cc network-service', ret)
            amulet.raise_status(amulet.FAIL, msg=message)
    def test_202_cinderdatera_cinder_backend_relation(self):
        u.log.debug('Checking cinder-datera:storage-backend to '
                    'cinder:storage-backend relation data...')
        unit = self.cinder_datera_sentry
        relation = ['storage-backend', 'cinder:storage-backend']

        sub = {
            "cinder": {
                "/etc/cinder/cinder.conf": {
                    "sections": {
                        "cinder-datera": [
                            ["san_ip", "172.19.1.222"],
                            ["san_login", "admin"],
                            ["san_password", "password"],
                            ["volume_backend_name", "cinder-datera"],
                            [
                                "volume_driver",
                                "cinder.volume.drivers.datera."
                                "datera_iscsi.DateraDriver"
                            ],
                            ["use_multipath_for_image_xfer", "true"],
                        ]
                    }
                }
            }
        }

        expected = {
            'subordinate_configuration': json.dumps(sub),
            'private-address': u.valid_ip,
            'backend_name': 'cinder-datera',
            'egress-subnets': u.not_null,
            'ingress-address': u.valid_ip,
        }

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            msg = u.relation_error('cinder cinder-datera storage-backend', ret)
            amulet.raise_status(amulet.FAIL, msg=msg)
Exemplo n.º 44
0
    def test_102_keystone_tenants(self):
        """Verify all existing tenants."""
        u.log.debug('Checking keystone tenants...')
        expected = [
            {'name': 'services',
             'enabled': True,
             'description': 'Created by Juju',
             'id': u.not_null},
            {'name': 'demoTenant',
             'enabled': True,
             'description': 'demo tenant',
             'id': u.not_null},
            {'name': 'admin',
             'enabled': True,
             'description': 'Created by Juju',
             'id': u.not_null}
        ]
        actual = self.keystone.tenants.list()

        ret = u.validate_tenant_data(expected, actual)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 45
0
    def test_104_keystone_service_catalog(self):
        """Verify that the service catalog endpoint data is valid."""
        u.log.debug('Checking keystone service catalog...')
        endpoint_id = {
            'adminURL': u.valid_url,
            'region': 'RegionOne',
            'publicURL': u.valid_url,
            'internalURL': u.valid_url,
            'id': u.not_null
        }

        expected = {
            'image': [endpoint_id],
            'object-store': [endpoint_id],
            'identity': [endpoint_id],
            's3': [endpoint_id]
        }
        actual = self.keystone_demo.service_catalog.get_endpoints()

        ret = u.validate_svc_catalog_endpoint_data(expected, actual)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
    def test_102_services(self):
        """Verify that the expected services are running on the
           corresponding service units."""
        services = {
            self.rmq0_sentry: ['rabbitmq-server'],
            self.rmq1_sentry: ['rabbitmq-server'],
            self.rmq2_sentry: ['rabbitmq-server'],
            self.cinder_sentry: ['cinder-scheduler',
                                 'cinder-volume'],
        }

        _release = self._get_openstack_release_string()
        if CompareOpenStackReleases(_release) >= 'ocata':
            services[self.cinder_sentry].append('apache2')
        else:
            services[self.cinder_sentry].append('cinder-api')

        ret = u.validate_services_by_name(services)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)

        u.log.info('OK\n')
    def test_404_connection(self):
        """Verify the apache status module gets disabled when
        hardening apache."""

        u.log.debug('Checking apache mod_status gets disabled.')
        unit = self.openstack_dashboard_sentry
        dashboard_relation = unit.relation('identity-service',
                                           'keystone:identity-service')
        dashboard_ip = dashboard_relation['private-address']

        u.log.debug('Enabling hardening for apache...')
        self.d.configure('openstack-dashboard', {'harden': 'apache'})
        time.sleep(5)  # wait for hook to run
        self.d.sentry.wait()  # wait for hook to finish

        try:
            urllib2.urlopen('http://%s/server-status' % (dashboard_ip))
        except urllib2.HTTPError as e:
            if e.code == 404:
                return
        msg = "Apache mod_status check failed."
        amulet.raise_status(amulet.FAIL, msg=msg)
    def test_202_rmq_nrpe_ext_master_relation(self):
        """Verify rabbitmq-server:nrpe nrpe-external-master relation data"""
        u.log.debug('Checking rmq:nrpe external master relation data...')
        unit = self.rmq0_sentry
        relation = ['nrpe-external-master',
                    'nrpe:nrpe-external-master']

        mon_sub = ('monitors:\n  remote:\n    nrpe:\n      rabbitmq: '
                   '{command: check_rabbitmq}\n      rabbitmq_queue: '
                   '{command: check_rabbitmq_queue}\n')

        expected = {
            'private-address': u.valid_ip,
            'monitors': mon_sub
        }

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            msg = u.relation_error('amqp nrpe', ret)
            amulet.raise_status(amulet.FAIL, msg=msg)

        u.log.info('OK\n')
    def test_202_cinderbackup_cinder_backend_relation(self):
        u.log.debug('Checking cinder-backup:backup-backend to '
                    'cinder:backup-backend relation data...')
        unit = self.cinder_backup_sentry
        relation = ['backup-backend', 'cinder:backup-backend']

        sub = ('{"cinder": {"/etc/cinder/cinder.conf": {"sections": '
               '{"DEFAULT": ['
               '["backup_driver", "cinder.backup.drivers.ceph"], '
               '["backup_ceph_conf", '
               '"/var/lib/charm/cinder-backup/ceph.conf"], '
               '["backup_ceph_pool", "cinder-backup"], '
               '["backup_ceph_user", "cinder-backup"]]}}}}')
        expected = {
            'subordinate_configuration': sub,
            'private-address': u.valid_ip,
            'backend_name': 'cinder-backup'
        }
        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            msg = u.relation_error('cinder cinder-backup backup-backend', ret)
            amulet.raise_status(amulet.FAIL, msg=msg)
Exemplo n.º 50
0
    def test_110_service_catalog(self):
        """Expect certain endpoints and endpoint data to be
        present in the Keystone service catalog"""
        u.log.debug('Checking service catalog endpoint data...')
        ep_validate = {
            'adminURL': u.valid_url,
            'region': 'RegionOne',
            'publicURL': u.valid_url,
            'internalURL': u.valid_url,
            'id': u.not_null
        }
        expected = {
            'compute': [ep_validate],
            'orchestration': [ep_validate],
            'image': [ep_validate],
            'identity': [ep_validate]
        }

        actual = self.keystone.service_catalog.get_endpoints()
        ret = u.validate_svc_catalog_endpoint_data(expected, actual)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 51
0
    def test_307_neutron_metering_agent_config(self):
        """Verify the data in the metering agent config file.  This is only
           available since havana."""
        u.log.debug('Checking neutron gateway metering agent '
                    'config file data...')
        unit = self.neutron_gateway_sentry
        conf = '/etc/neutron/metering_agent.ini'
        expected = {
            'driver': 'neutron.services.metering.drivers.iptables.'
            'iptables_driver.IptablesMeteringDriver',
            'measure_interval': '30',
            'report_interval': '300',
            'interface_driver': 'neutron.agent.linux.interface.'
            'OVSInterfaceDriver',
            'use_namespaces': 'True'
        }
        section = 'DEFAULT'

        ret = u.validate_config_data(unit, conf, section, expected)
        if ret:
            message = "metering agent config error: {}".format(ret)
            amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 52
0
    def _extend_cinder_volume(self, vol_id, new_size=2):
        """Extend an existing cinder volume size.

        :param vol_id: existing cinder volume to extend
        :param new_size: new size in gigabytes
        :returns: None if successful; Failure message otherwise
        """
        # Extend existing volume size
        try:
            self.cinder.volumes.extend(vol_id, new_size)
            vol_size_org = self.cinder.volumes.get(vol_id).size
        except Exception as e:
            msg = 'Failed to extend volume: {}'.format(e)
            amulet.raise_status(amulet.FAIL, msg=msg)

        # Confirm that the volume reaches available status.
        ret = u.resource_reaches_status(self.cinder.volumes,
                                        vol_id,
                                        expected_stat="available",
                                        msg="Volume status wait")
        if not ret:
            msg = ('Cinder volume failed to reach expected state '
                   'while extending.')
            return ret

        # Validate volume size and status
        u.log.debug('Validating volume attributes...')
        vol_size_ext = self.cinder.volumes.get(vol_id).size
        vol_stat = self.cinder.volumes.get(vol_id).status
        msg_attr = ('Volume attributes - orig size:{} extended size:{} '
                    'stat:{}'.format(vol_size_org, vol_size_ext, vol_stat))

        if vol_size_ext > vol_size_org and vol_stat == 'available':
            u.log.debug(msg_attr)
        else:
            msg = ('Volume validation failed, {}'.format(msg_attr))
            return ret

        return None
Exemplo n.º 53
0
    def test_303_neutron_fwaas_driver_config(self):
        """Verify the data in the fwaas driver config file.  This is only
           available since havana."""
        u.log.debug('Checking neutron gateway fwaas config file data...')
        unit = self.neutron_gateway_sentry
        conf = '/etc/neutron/fwaas_driver.ini'
        expected = {'enabled': 'True'}
        section = 'fwaas'

        if self._get_openstack_release() >= self.trusty_kilo:
            # Kilo or later
            expected['driver'] = ('neutron_fwaas.services.firewall.drivers.'
                                  'linux.iptables_fwaas.IptablesFwaasDriver')
        else:
            # Juno or earlier
            expected['driver'] = ('neutron.services.firewall.drivers.linux.'
                                  'iptables_fwaas.IptablesFwaasDriver')

        ret = u.validate_config_data(unit, conf, section, expected)
        if ret:
            message = "fwaas driver config error: {}".format(ret)
            amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 54
0
    def test_400_ceph_check_osd_pools(self):
        """Check osd pools on all ceph units, expect them to be
        identical, and expect specific pools to be present."""
        u.log.debug('Checking pools on ceph units...')

        expected_pools = self.get_ceph_expected_pools()
        results = []
        sentries = [
            self.ceph_osd_sentry,
            self.ceph0_sentry,
            self.ceph1_sentry,
            self.ceph2_sentry
        ]

        # Check for presence of expected pools on each unit
        u.log.debug('Expected pools: {}'.format(expected_pools))
        for sentry_unit in sentries:
            pools = u.get_ceph_pools(sentry_unit)
            results.append(pools)

            for expected_pool in expected_pools:
                if expected_pool not in pools:
                    msg = ('{} does not have pool: '
                           '{}'.format(sentry_unit.info['unit_name'],
                                       expected_pool))
                    amulet.raise_status(amulet.FAIL, msg=msg)
            u.log.debug('{} has (at least) the expected '
                        'pools.'.format(sentry_unit.info['unit_name']))

        # Check that all units returned the same pool name:id data
        ret = u.validate_list_of_identical_dicts(results)
        if ret:
            u.log.debug('Pool list results: {}'.format(results))
            msg = ('{}; Pool list results are not identical on all '
                   'ceph units.'.format(ret))
            amulet.raise_status(amulet.FAIL, msg=msg)
        else:
            u.log.debug('Pool list on all ceph units produced the '
                        'same results (OK).')
Exemplo n.º 55
0
    def test_206_neutron_api_neutron_ovs_plugin_api_relation(self):
        """Verify neutron-api to neutron-openvswitch neutron-plugin-api"""
        u.log.debug('Checking neutron-api:neutron-ovs plugin-api '
                    'relation data...')
        unit = self.neutron_api_sentry
        relation = [
            'neutron-plugin-api', 'neutron-openvswitch:neutron-plugin-api'
        ]

        u.log.debug(unit.relation(relation[0], relation[1]))
        expected = {
            'auth_host': u.valid_ip,
            'auth_port': '35357',
            'auth_protocol': 'http',
            'enable-dvr': 'False',
            'enable-l3ha': 'False',
            'l2-population': 'True',
            'neutron-security-groups': 'False',
            'overlay-network-type': 'gre',
            'private-address': u.valid_ip,
            'region': 'RegionOne',
            'service_host': u.valid_ip,
            'service_password': u.not_null,
            'service_port': '5000',
            'service_protocol': 'http',
            'service_tenant': 'services',
            'service_username': '******',
        }

        if self._get_openstack_release() >= self.trusty_mitaka:
            expected.update({
                'dns-domain': 'openstack.example.',
            })

        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            message = u.relation_error(
                'neutron-api neutron-ovs neutronplugin-api', ret)
            amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 56
0
    def test_110_service_catalog(self):
        """Verify that the service catalog endpoint data is valid."""
        u.log.debug('Checking keystone service catalog data...')
        endpoint_check = {
            'adminURL': u.valid_url,
            'id': u.not_null,
            'region': 'RegionOne',
            'publicURL': u.valid_url,
            'internalURL': u.valid_url
        }
        expected = {
            'metering': [endpoint_check],
            'identity': [endpoint_check],
            'alarming': [endpoint_check],
        }
        actual = self.keystone.service_catalog.get_endpoints()

        ret = u.validate_svc_catalog_endpoint_data(expected, actual)
        if ret:
            amulet.raise_status(amulet.FAIL, msg=ret)

        u.log.debug('OK')
 def test_110_users(self):
     """Verify expected users."""
     u.log.debug('Checking keystone users...')
     expected = [{
         'name': 'cinder_cinderv2',
         'enabled': True,
         'tenantId': u.not_null,
         'id': u.not_null,
         'email': 'juju@localhost'
     }, {
         'name': 'admin',
         'enabled': True,
         'tenantId': u.not_null,
         'id': u.not_null,
         'email': 'juju@localhost'
     }]
     if self._get_openstack_release() > self.xenial_ocata:
         expected[0]['name'] = 'cinderv2_cinderv3'
     actual = self.keystone.users.list()
     ret = u.validate_user_data(expected, actual)
     if ret:
         amulet.raise_status(amulet.FAIL, msg=ret)
Exemplo n.º 58
0
    def test_114_aodh_api_endpoint(self):
        """Verify the aodh api endpoint data."""
        u.log.debug('Checking aodh api endpoint data...')
        endpoints = self.keystone.endpoints.list()
        u.log.debug(endpoints)
        admin_port = internal_port = public_port = '8042'
        expected = {
            'id': u.not_null,
            'region': 'RegionOne',
            'adminurl': u.valid_url,
            'internalurl': u.valid_url,
            'publicurl': u.valid_url,
            'service_id': u.not_null
        }

        ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
                                       public_port, expected)
        if ret:
            message = 'Aodh endpoint: {}'.format(ret)
            amulet.raise_status(amulet.FAIL, msg=message)

        u.log.debug('OK')
Exemplo n.º 59
0
    def test_108_s3_api_endpoint(self):
        """Verify the S3 api endpoint data."""
        if self._get_openstack_release() >= self.trusty_kilo:
            return

        u.log.debug('Checking s3 endpoint data...')
        endpoints = self.keystone.endpoints.list()
        admin_port = internal_port = public_port = '3333'
        expected = {
            'id': u.not_null,
            'region': 'RegionOne',
            'adminurl': u.valid_url,
            'internalurl': u.valid_url,
            'publicurl': u.valid_url,
            'service_id': u.not_null
        }

        ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
                                       public_port, expected)
        if ret:
            message = 'S3 endpoint: {}'.format(ret)
            amulet.raise_status(amulet.FAIL, msg=message)
Exemplo n.º 60
0
    def test_202_cinderceph_cinder_backend_relation(self):
        u.log.debug('Checking cinder-ceph:storage-backend to '
                    'cinder:storage-backend relation data...')
        unit = self.cinder_ceph_sentry
        relation = ['storage-backend', 'cinder:storage-backend']
        backend_uuid, _ = unit.run('leader-get secret-uuid')

        sub_dict = {
            "cinder": {
                "/etc/cinder/cinder.conf": {
                    "sections": {
                        "cinder-ceph": [
                            ["volume_backend_name", "cinder-ceph"],
                            [
                                "volume_driver",
                                "cinder.volume.drivers.rbd.RBDDriver"
                            ],
                            ["rbd_pool", "cinder-ceph"],
                            ["rbd_user", "cinder-ceph"],
                            ["rbd_secret_uuid", backend_uuid],
                            [
                                'rbd_ceph_conf',
                                '/var/lib/charm/cinder-ceph/ceph.conf'
                            ],
                        ]
                    }
                }
            }
        }

        expected = {
            'subordinate_configuration': json.dumps(sub_dict),
            'private-address': u.valid_ip,
            'backend_name': 'cinder-ceph'
        }
        ret = u.validate_relation_data(unit, relation, expected)
        if ret:
            msg = u.relation_error('cinder cinder-ceph storage-backend', ret)
            amulet.raise_status(amulet.FAIL, msg=msg)