def create_fake_external_network(self):
        net_name = utils.rand_name("ext-net-")
        net_body = {
            "network": {
                "name": net_name,
                "router:external": True,
                "provider:network_type": "local"
            }
        }

        ext_net = self.os_clients.network.create_network(net_body)['network']
        subnet_name = utils.rand_name("ext-subnet-")
        subnet_body = {
            "subnet": {
                "name":
                subnet_name,
                "network_id":
                ext_net["id"],
                "ip_version":
                4,
                "cidr":
                "10.255.255.0/24",
                "allocation_pools": [{
                    "start": "10.255.255.100",
                    "end": "10.255.255.200"
                }]
            }
        }
        self.os_clients.network.create_subnet(subnet_body)
        return ext_net
    def test_glance_notifications(self, os_clients, es_client):
        """Check that Glance notifications are present in Elasticsearch

        Scenario:
            1. Create, update and delete image actions using Glance v2
            2. Check that Glance notifications are present in current
               Elasticsearch index

        Duration 15m
        """
        glance_event_types = [
            "image.create", "image.prepare", "image.upload", "image.activate",
            "image.update", "image.delete"
        ]

        image_name = utils.rand_name("image-")
        client = os_clients.image
        image = client.images.create(name=image_name,
                                     container_format="bare",
                                     disk_format="raw")
        client.images.upload(image.id, "dummy_data")
        wait_for_resource_status(client.images, image.id, "active")

        prop = utils.rand_name("prop")
        value_prop = utils.rand_name("value")
        properties = '{0}: {1}'.format(prop, value_prop)
        image = client.images.update(image.id, group_props=properties)
        assert any(image[key] == properties for key in image) is True

        client.images.delete(image.id)
        utils.wait(lambda: (image.id not in client.images.list()))

        es_client.check_notifications(glance_event_types,
                                      query_filter="Logger:glance",
                                      size=500)
示例#3
0
    def create_stack(self, template, disable_rollback=True, parameters=None,
                     wait_active=True):
        parameters = parameters or {}
        stack_name = utils.rand_name('stack-')
        stack_id = self.os_clients.orchestration.stacks.create(
            stack_name=stack_name,
            template=template,
            parameters=parameters,
            disable_rollback=disable_rollback
        )['stack']['id']

        # self.addCleanup(self.delete_stack, stack_id)

        # heat client doesn't return stack details after creation
        # so need to request them
        stack = self.os_clients.orchestration.stacks.get(stack_id)
        if wait_active:
            utils.wait(
                (lambda:
                 self.os_clients.orchestration.stacks.get(
                     stack_id).stack_status == "CREATE_COMPLETE"),
                interval=10,
                timeout=180,
            )
        return stack
示例#4
0
 def create_sec_group(self, rulesets=None):
     if rulesets is None:
         rulesets = [
             {
                 # ssh
                 'ip_protocol': 'tcp',
                 'from_port': 22,
                 'to_port': 22,
                 'cidr': '0.0.0.0/0',
             },
             {
                 # ping
                 'ip_protocol': 'icmp',
                 'from_port': -1,
                 'to_port': -1,
                 'cidr': '0.0.0.0/0',
             }
         ]
     sg_name = utils.rand_name("secgroup-")
     sg_desc = sg_name + " description"
     secgroup = self.os_clients.compute.security_groups.create(
         sg_name, sg_desc)
     for ruleset in rulesets:
         self.os_clients.compute.security_group_rules.create(
             secgroup.id, **ruleset)
     return secgroup
 def create_basic_server(self,
                         image=None,
                         flavor=None,
                         net=None,
                         availability_zone=None,
                         sec_groups=(),
                         wait_timeout=3 * 60):
     os_conn = self.os_clients
     image = image or self.get_cirros_image()
     flavor = flavor or self.get_micro_flavor()
     net = net or self.get_internal_network()
     kwargs = {}
     if sec_groups:
         kwargs['security_groups'] = sec_groups
     server = os_conn.compute.servers.create(
         utils.rand_name("server-"),
         image,
         flavor,
         nics=[{
             "net-id": net["id"]
         }],
         availability_zone=availability_zone,
         **kwargs)
     if wait_timeout:
         utils.wait(
             lambda: os_conn.compute.servers.get(server).status == "ACTIVE",
             timeout=wait_timeout,
             timeout_msg=("Create server {!r} failed by timeout. "
                          "Please, take a look at OpenStack logs".format(
                              server.id)))
     return server
示例#6
0
    def test_glance_metrics(self, destructive, prometheus_api, os_clients):
        image_name = utils.rand_name("image-")
        client = os_clients.image
        image = client.images.create(
            name=image_name,
            container_format="bare",
            disk_format="raw",
            visibility="public")
        client.images.upload(image.id, "dummy_data")
        wait_for_resource_status(client.images, image.id, "active")
        destructive.append(lambda: client.images.delete(image.id))
        filter = {"visibility": "public"}

        images_count = len([im for im in client.images.list(
                            filters=filter)])
        images_size = sum([im["size"] for im in client.images.list(
                           filters=filter)])

        count_query = ('{__name__="openstack_glance_images",'
                       'visibility="public",status="active"}')
        err_count_msg = "Incorrect image count in metric {}".format(
            count_query)
        self.check_openstack_metrics(
            prometheus_api, count_query, images_count, err_count_msg)

        size_query = ('{__name__="openstack_glance_images_size",'
                      'visibility="public", status="active"}')
        error_size_msg = "Incorrect image size in metric {}".format(size_query)
        self.check_openstack_metrics(
            prometheus_api, size_query, images_size, error_size_msg)

        client.images.delete(image.id)
        utils.wait(
            lambda: (image.id not in [i["id"] for i in client.images.list()])
        )
示例#7
0
    def test_cinder_metrics(self, destructive, prometheus_api, os_clients):
        volume_name = utils.rand_name("volume-")
        expected_volume_status = settings.VOLUME_STATUS
        client = os_clients.volume
        volume = client.volumes.create(size=1, name=volume_name)
        wait_for_resource_status(client.volumes, volume.id,
                                 expected_volume_status)
        destructive.append(lambda: client.volume.delete(volume))
        filter = {'status': expected_volume_status, 'all_tenants': 1}

        volumes_count = len([vol for vol in client.volumes.list(
                             search_opts=filter)])
        volumes_size = sum([vol.size for vol in client.volumes.list(
                            search_opts=filter)]) * 10**9

        count_query = ('{{__name__="openstack_cinder_volumes",'
                       'status="{0}"}}'.format(expected_volume_status))
        err_count_msg = "Incorrect volume count in metric {}".format(
            count_query)
        self.check_openstack_metrics(
            prometheus_api, count_query, volumes_count, err_count_msg)

        size_query = ('{{__name__="openstack_cinder_volumes_size",'
                      'status="{0}"}}'.format(expected_volume_status))
        error_size_msg = "Incorrect volume size in metric {}".format(
            size_query)
        self.check_openstack_metrics(
            prometheus_api, size_query, volumes_size, error_size_msg)

        client.volumes.delete(volume)
        utils.wait(
            lambda: (volume.id not in [v.id for v in client.volumes.list()])
        )
示例#8
0
 def create_network(self, tenant_id):
     net_name = utils.rand_name("net-")
     net_body = {
         'network': {
             'name': net_name,
             'tenant_id': tenant_id
         }
     }
     net = self.os_clients.network.create_network(net_body)['network']
     return net
示例#9
0
    def test_keystone_notifications_toolchain(self):
        """Check that Keystone notifications are present in Elasticsearch

        Scenario:
            1. Create user and authenticate with it to Horizon
            2. Check that Keystone notifications are present in current
               Elasticsearch index

        Duration 15m
        """
        keystone_event_types = [
            "identity.role.created", "identity.role.deleted",
            "identity.user.created", "identity.user.deleted",
            "identity.project.created", "identity.project.deleted",
            "identity.authenticate"
        ]

        client = self.os_clients.auth
        tenant = client.tenants.create(utils.rand_name("tenant-"))

        password = "******"
        name = utils.rand_name("user-")
        user = client.users.create(name, password, "*****@*****.**", tenant.id)

        role = client.roles.create(utils.rand_name("role-"))

        auth = client.tokens.authenticate(username=user.name,
                                          password=password,
                                          tenant_id=tenant.id,
                                          tenant_name=tenant.name)
        assert auth

        client.roles.delete(role)
        client.users.delete(user)
        client.tenants.delete(tenant)

        self.es_kibana_api.check_notifications(keystone_event_types,
                                               index_type="notification",
                                               query_filter="Logger:keystone",
                                               size=500)
示例#10
0
 def create_router(self, ext_net, tenant_id):
     name = utils.rand_name('router-')
     router_body = {
         'router': {
             'name': name,
             'external_gateway_info': {
                 'network_id': ext_net['id']
             },
             'tenant_id': tenant_id
         }
     }
     router = self.os_clients.network.create_router(router_body)['router']
     return router
 def create_subnet(self, net, tenant_id, cidr=None):
     subnet_name = utils.rand_name("subnet-")
     subnet_body = {
         'subnet': {
             "name": subnet_name,
             'network_id': net['id'],
             'ip_version': 4,
             'cidr': cidr if cidr else '10.1.7.0/24',
             'tenant_id': tenant_id
         }
     }
     subnet = self.os_clients.network.create_subnet(subnet_body)['subnet']
     return subnet
    def test_heat_notifications(self, os_clients, os_actions, es_client):
        """Check that Heat notifications are present in Elasticsearch

        Scenario:
            1. Run Heat platform actions
            2. Check that Heat notifications are present in current
               Elasticsearch index

        Duration 25m
        """
        heat_event_types = [
            # "orchestration.stack.check.start",
            # "orchestration.stack.check.end",
            "orchestration.stack.create.start",
            "orchestration.stack.create.end",
            "orchestration.stack.delete.start",
            "orchestration.stack.delete.end",
            # "orchestration.stack.resume.start",
            # "orchestration.stack.resume.end",
            # "orchestration.stack.rollback.start",
            # "orchestration.stack.rollback.end",
            # "orchestration.stack.suspend.start",
            # "orchestration.stack.suspend.end"
        ]

        name = utils.rand_name("heat-flavor-")
        flavor = os_actions.create_flavor(name)

        filepath = utils.get_fixture("heat_create_neutron_stack_template.yaml",
                                     parent_dirs=("heat", ))
        with open(filepath) as template_file:
            template = template_file.read()

        parameters = {
            'InstanceType': flavor.name,
            'ImageId': os_actions.get_cirros_image().id,
            'network': os_actions.get_internal_network()["id"],
        }

        stack = os_actions.create_stack(template, parameters=parameters)

        # os_clients.orchestration.actions.suspend(stack.id)
        # utils.wait(
        #     (lambda:
        #      os_clients.orchestration.stacks.get(
        #          stack.id).stack_status == "SUSPEND_COMPLETE"),
        #     interval=10,
        #     timeout=180,
        # )

        resources = os_clients.orchestration.resources.list(stack.id)
        resource_server = [
            res for res in resources if res.resource_type == "OS::Nova::Server"
        ][0]
        # instance = os_clients.compute.servers.get(
        #     resource_server.physical_resource_id)

        # assert instance.status == "SUSPENDED"
        #
        # os_clients.orchestration.actions.resume(stack.id)
        # utils.wait(
        #     (lambda:
        #      os_clients.orchestration.stacks.get(
        #          stack.id).stack_status == "RESUME_COMPLETE"),
        #     interval=10,
        #     timeout=180,
        # )

        instance = os_clients.compute.servers.get(
            resource_server.physical_resource_id)
        assert instance.status == "ACTIVE"

        # os_clients.orchestration.actions.check(stack.id)
        #
        # utils.wait(
        #     (lambda:
        #      os_clients.orchestration.stacks.get(
        #          stack.id).stack_status == "CHECK_COMPLETE"),
        #     interval=10,
        #     timeout=180,
        # )

        os_clients.orchestration.stacks.delete(stack.id)
        os_clients.compute.flavors.delete(flavor.id)

        name = utils.rand_name("heat-flavor-")
        extra_large_flavor = os_actions.create_flavor(name, 1048576)
        parameters['InstanceType'] = extra_large_flavor.name
        stack = os_actions.create_stack(template,
                                        disable_rollback=False,
                                        parameters=parameters,
                                        wait_active=False)
        assert stack.stack_status == "CREATE_IN_PROGRESS"

        utils.wait(
            (lambda: os_clients.orchestration.stacks.get(stack.id).stack_status
             in ("DELETE_COMPLETE", "ROLLBACK_COMPLETE")),
            interval=10,
            timeout=360,
        )

        resources = os_clients.orchestration.resources.list(stack.id)
        resource_servers = [
            res for res in resources if res.resource_type == "OS::Nova::Server"
        ]
        assert (not resource_servers
                or resource_servers[0].physical_resource_id == "")

        os_clients.compute.flavors.delete(extra_large_flavor.id)

        es_client.check_notifications(heat_event_types,
                                      query_filter="Logger:heat",
                                      size=500)