def test_execute_dummy_action_plan(self):
        _, audit_template = self.create_audit_template()
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(test.call_until_true(
            func=functools.partial(self.has_audit_succeeded, audit['uuid']),
            duration=30,
            sleep_for=.5
        ))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        # Execute the action by changing its state to PENDING
        _, updated_ap = self.client.update_action_plan(
            action_plan['uuid'],
            patch=[{'path': '/state', 'op': 'replace', 'value': 'PENDING'}]
        )

        self.assertTrue(test.call_until_true(
            func=functools.partial(
                self.has_action_plan_finished, action_plan['uuid']),
            duration=30,
            sleep_for=.5
        ))
        _, finished_ap = self.client.show_action_plan(action_plan['uuid'])

        self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
        self.assertEqual('SUCCEEDED', finished_ap['state'])
Exemple #2
0
    def check_flip_status(self, floating_ip, status):
        """Verifies floatingip reaches the given status

        :param dict floating_ip: floating IP dict to check status
        :param status: target status
        :raises: AssertionError if status doesn't match
        """

        # TODO(ptoohill): Find a way to utilze the proper client method

        floatingip_id = floating_ip['id']

        def refresh():
            result = (self.floating_ips_client_admin.
                      show_floatingip(floatingip_id)['floatingip'])
            return status == result['status']

        test.call_until_true(refresh, 100, 1)

        floating_ip = self.floating_ips_client_admin.show_floatingip(
            floatingip_id)['floatingip']
        self.assertEqual(status, floating_ip['status'],
                         message="FloatingIP: {fp} is at status: {cst}. "
                                 "failed  to reach status: {st}"
                         .format(fp=floating_ip, cst=floating_ip['status'],
                                 st=status))
        LOG.info("FloatingIP: {fp} is at status: {st}"
                 .format(fp=floating_ip, st=status))
    def _hotplug_server(self):
        old_floating_ip, server = self.floating_ip_tuple
        ip_address = old_floating_ip.floating_ip_address
        private_key = self.servers[server].private_key
        ssh_client = self.get_remote_client(ip_address, private_key=private_key)
        old_nic_list = self._get_server_nics(ssh_client)
        # get a port from a list of one item
        port_list = self._list_ports(device_id=server.id)
        self.assertEqual(1, len(port_list))
        old_port = port_list[0]
        self.compute_client.servers.interface_attach(server=server, net_id=self.new_net.id, port_id=None, fixed_ip=None)
        # move server to the head of the cleanup list
        self.addCleanup(self.cleanup_wrapper, server)

        def check_ports():
            port_list = [port for port in self._list_ports(device_id=server.id) if port != old_port]
            return len(port_list) == 1

        test.call_until_true(check_ports, 60, 1)
        new_port_list = [p for p in self._list_ports(device_id=server.id) if p != old_port]
        self.assertEqual(1, len(new_port_list))
        new_port = new_port_list[0]
        new_port = net_common.DeletablePort(client=self.network_client, **new_port)
        new_nic_list = self._get_server_nics(ssh_client)
        diff_list = [n for n in new_nic_list if n not in old_nic_list]
        self.assertEqual(1, len(diff_list))
        num, new_nic = diff_list[0]
        ssh_client.assign_static_ip(nic=new_nic, addr=new_port.fixed_ips[0]["ip_address"])
        ssh_client.turn_nic_on(nic=new_nic)
Exemple #4
0
 def assertScale(from_servers, to_servers):
     call_until_true(lambda: server_count() == to_servers,
                     timeout, interval)
     self.assertEqual(to_servers, self.server_count,
                      'Failed scaling from %d to %d servers. '
                      'Current server count: %s' % (
                          from_servers, to_servers,
                          self.server_count))
    def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
        net_list = self.prepare_network(address6_mode=address6_mode,
                                        n_subnets6=n_subnets6,
                                        dualnet=dualnet)

        sshv4_1, ips_from_api_1, sid1 = self.prepare_server(networks=net_list)
        sshv4_2, ips_from_api_2, sid2 = self.prepare_server(networks=net_list)

        def guest_has_address(ssh, addr):
            return addr in ssh.get_ip_list()

        # Turn on 2nd NIC for Cirros when dualnet
        if dualnet:
            self.turn_nic6_on(sshv4_1, sid1)
            self.turn_nic6_on(sshv4_2, sid2)

        # get addresses assigned to vNIC as reported by 'ip address' utility
        ips_from_ip_1 = sshv4_1.get_ip_list()
        ips_from_ip_2 = sshv4_2.get_ip_list()
        self.assertIn(ips_from_api_1['4'], ips_from_ip_1)
        self.assertIn(ips_from_api_2['4'], ips_from_ip_2)
        for i in range(n_subnets6):
            # v6 should be configured since the image supports it
            # It can take time for ipv6 automatic address to get assigned
            srv1_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_1, ips_from_api_1['6'][i])

            srv2_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_2, ips_from_api_2['6'][i])

            self.assertTrue(test.call_until_true(srv1_v6_addr_assigned,
                                                 CONF.compute.ping_timeout, 1))

            self.assertTrue(test.call_until_true(srv2_v6_addr_assigned,
                                                 CONF.compute.ping_timeout, 1))

        self._check_connectivity(sshv4_1, ips_from_api_2['4'])
        self._check_connectivity(sshv4_2, ips_from_api_1['4'])

        # Some VM (like cirros) may not have ping6 utility
        result = sshv4_1.exec_command('whereis ping6')
        is_ping6 = False if result == 'ping6:\n' else True
        if is_ping6:
            for i in range(n_subnets6):
                self._check_connectivity(sshv4_1,
                                         ips_from_api_2['6'][i])
                self._check_connectivity(sshv4_1,
                                         self.subnets_v6[i].gateway_ip)
                self._check_connectivity(sshv4_2,
                                         ips_from_api_1['6'][i])
                self._check_connectivity(sshv4_2,
                                         self.subnets_v6[i].gateway_ip)
        else:
            LOG.warning('Ping6 is not available, skipping')
 def call_until_valid(self, func, duration, *args, **kwargs):
     # Call until get valid response for "duration"
     # because tenant usage doesn't become available immediately
     # after create VM.
     def is_valid():
         try:
             self.resp = func(*args, **kwargs)
             return True
         except e.InvalidHTTPResponseBody:
             return False
     test.call_until_true(is_valid, duration, 1)
     return self.resp
    def _hotplug_server(self):
        old_floating_ip, server = self.floating_ip_tuple
        ip_address = old_floating_ip.floating_ip_address
        private_key = self._get_server_key(server)
        ssh_client = self.get_remote_client(ip_address,
                                            private_key=private_key)
        old_nic_list = self._get_server_nics(ssh_client)
        # get a port from a list of one item
        port_list = self._list_ports(device_id=server['id'])
        self.assertEqual(1, len(port_list))
        old_port = port_list[0]
        interface = self.interface_client.create_interface(
            server_id=server['id'],
            net_id=self.new_net.id)['interfaceAttachment']
        self.addCleanup(self.network_client.wait_for_resource_deletion,
                        'port',
                        interface['port_id'], client=self.ports_client)
        self.addCleanup(self.delete_wrapper,
                        self.interface_client.delete_interface,
                        server['id'], interface['port_id'])

        def check_ports():
            self.new_port_list = [port for port in
                                  self._list_ports(device_id=server['id'])
                                  if port['id'] != old_port['id']]
            return len(self.new_port_list) == 1

        if not test.call_until_true(check_ports, CONF.network.build_timeout,
                                    CONF.network.build_interval):
            raise exceptions.TimeoutException(
                "No new port attached to the server in time (%s sec)! "
                "Old port: %s. Number of new ports: %d" % (
                    CONF.network.build_timeout, old_port,
                    len(self.new_port_list)))
        new_port = net_resources.DeletablePort(ports_client=self.ports_client,
                                               **self.new_port_list[0])

        def check_new_nic():
            new_nic_list = self._get_server_nics(ssh_client)
            self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
            return len(self.diff_list) == 1

        if not test.call_until_true(check_new_nic, CONF.network.build_timeout,
                                    CONF.network.build_interval):
            raise exceptions.TimeoutException("Interface not visible on the "
                                              "guest after %s sec"
                                              % CONF.network.build_timeout)

        num, new_nic = self.diff_list[0]
        ssh_client.assign_static_ip(nic=new_nic,
                                    addr=new_port.fixed_ips[0]['ip_address'])
        ssh_client.turn_nic_on(nic=new_nic)
Exemple #8
0
    def test_datasource_db_sync_remove(self):
        # Verify that a replica removes a datasource when a datasource
        # disappears from the database.
        CLIENT2_PORT = 4001
        client1 = self.admin_manager.congress_client
        fake_id = self.create_fake(client1)
        need_to_delete_fake = True
        try:
            self.start_replica(CLIENT2_PORT)

            # Verify that primary server has fake datasource
            if not test.call_until_true(
                    func=lambda: self.datasource_exists(client1, fake_id),
                    duration=60, sleep_for=1):
                raise exceptions.TimeoutException(
                    "primary should have fake, but does not")

            # Create session for second server.
            client2 = self.create_client(CLIENT2_PORT)

            # Verify that second server has fake datasource
            if not test.call_until_true(
                    func=lambda: self.datasource_exists(client2, fake_id),
                    duration=60, sleep_for=1):
                raise exceptions.TimeoutException(
                    "replica should have fake, but does not")

            # Remove fake from primary server instance.
            LOG.debug("removing fake datasource %s", str(fake_id))
            client1.delete_datasource(fake_id)
            need_to_delete_fake = False

            # Confirm that fake is gone from primary server instance.
            if not test.call_until_true(
                    func=lambda: self.datasource_missing(client1, fake_id),
                    duration=60, sleep_for=1):
                self.stop_replica(CLIENT2_PORT)
                raise exceptions.TimeoutException(
                    "primary instance still has fake")
            LOG.debug("removed fake datasource from primary instance")

            # Confirm that second service instance removes fake.
            if not test.call_until_true(
                    func=lambda: self.datasource_missing(client2, fake_id),
                    duration=60, sleep_for=1):
                raise exceptions.TimeoutException(
                    "replica should remove fake, but still has it")

        finally:
            self.stop_replica(CLIENT2_PORT)
            if need_to_delete_fake:
                self.admin_manager.congress_client.delete_datasource(fake_id)
    def test_execute_dummy_action_plan(self):
        """Execute an action plan based on the 'dummy' strategy

        - create an audit template with the 'dummy' strategy
        - run the audit to create an action plan
        - get the action plan
        - run the action plan
        - get results and make sure it succeeded
        """
        _, goal = self.client.show_goal("dummy")
        _, audit_template = self.create_audit_template(goal['uuid'])
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(test.call_until_true(
            func=functools.partial(self.has_audit_succeeded, audit['uuid']),
            duration=30,
            sleep_for=.5
        ))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        # Execute the action by changing its state to PENDING
        _, updated_ap = self.client.update_action_plan(
            action_plan['uuid'],
            patch=[{'path': '/state', 'op': 'replace', 'value': 'PENDING'}]
        )

        self.assertTrue(test.call_until_true(
            func=functools.partial(
                self.has_action_plan_finished, action_plan['uuid']),
            duration=30,
            sleep_for=.5
        ))
        _, finished_ap = self.client.show_action_plan(action_plan['uuid'])
        _, action_list = self.client.list_actions(
            action_plan_uuid=finished_ap["uuid"])

        action_counter = collections.Counter(
            act['action_type'] for act in action_list['actions'])

        self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
        self.assertEqual('SUCCEEDED', finished_ap['state'])

        # A dummy strategy generates 2 "nop" actions and 1 "sleep" action
        self.assertEqual(3, len(action_list['actions']))
        self.assertEqual(2, action_counter.get("nop"))
        self.assertEqual(1, action_counter.get("sleep"))
    def _hotplug_server(self):
        old_floating_ip, server = self.floating_ip_tuple
        ip_address = old_floating_ip.floating_ip_address
        private_key = self.servers[server].private_key
        ssh_client = self.get_remote_client(ip_address,
                                            private_key=private_key)
        old_nic_list = self._get_server_nics(ssh_client)
        # get a port from a list of one item
        port_list = self._list_ports(device_id=server.id)
        self.assertEqual(1, len(port_list))
        old_port = port_list[0]
        self.compute_client.servers.interface_attach(server=server,
                                                     net_id=self.new_net.id,
                                                     port_id=None,
                                                     fixed_ip=None)
        # move server to the head of the cleanup list
        self.addCleanup(self.delete_timeout,
                        self.compute_client.servers,
                        server.id)
        self.addCleanup(self.delete_wrapper, server)

        def check_ports():
            self.new_port_list = [port for port in
                                  self._list_ports(device_id=server.id)
                                  if port != old_port]
            return len(self.new_port_list) == 1

        if not test.call_until_true(check_ports, CONF.network.build_timeout,
                                    CONF.network.build_interval):
            raise exceptions.TimeoutException("No new port attached to the "
                                              "server in time (%s sec) !"
                                              % CONF.network.build_timeout)
        new_port = net_common.DeletablePort(client=self.network_client,
                                            **self.new_port_list[0])

        def check_new_nic():
            new_nic_list = self._get_server_nics(ssh_client)
            self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
            return len(self.diff_list) == 1

        if not test.call_until_true(check_new_nic, CONF.network.build_timeout,
                                    CONF.network.build_interval):
            raise exceptions.TimeoutException("Interface not visible on the "
                                              "guest after %s sec"
                                              % CONF.network.build_timeout)

        num, new_nic = self.diff_list[0]
        ssh_client.assign_static_ip(nic=new_nic,
                                    addr=new_port.fixed_ips[0]['ip_address'])
        ssh_client.turn_nic_on(nic=new_nic)
Exemple #11
0
    def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
        net_list = self.prepare_network(address6_mode=address6_mode,
                                        n_subnets6=n_subnets6,
                                        dualnet=dualnet)

        sshv4_1, ips_from_api_1, sid1 = self.prepare_server(networks=net_list)
        sshv4_2, ips_from_api_2, sid2 = self.prepare_server(networks=net_list)

        def guest_has_address(ssh, addr):
            return addr in ssh.get_ip_list()

        # Turn on 2nd NIC for Cirros when dualnet
        if dualnet:
            self.turn_nic6_on(sshv4_1, sid1)
            self.turn_nic6_on(sshv4_2, sid2)

        # get addresses assigned to vNIC as reported by 'ip address' utility
        ips_from_ip_1 = sshv4_1.get_ip_list()
        ips_from_ip_2 = sshv4_2.get_ip_list()
        self.assertIn(ips_from_api_1['4'], ips_from_ip_1)
        self.assertIn(ips_from_api_2['4'], ips_from_ip_2)
        for i in range(n_subnets6):
            # v6 should be configured since the image supports it
            # It can take time for ipv6 automatic address to get assigned
            srv1_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_1, ips_from_api_1['6'][i])

            srv2_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_2, ips_from_api_2['6'][i])

            self.assertTrue(test.call_until_true(srv1_v6_addr_assigned,
                            CONF.validation.ping_timeout, 1))

            self.assertTrue(test.call_until_true(srv2_v6_addr_assigned,
                            CONF.validation.ping_timeout, 1))

        self._check_connectivity(sshv4_1, ips_from_api_2['4'])
        self._check_connectivity(sshv4_2, ips_from_api_1['4'])

        for i in range(n_subnets6):
            self._check_connectivity(sshv4_1,
                                     ips_from_api_2['6'][i])
            self._check_connectivity(sshv4_1,
                                     self.subnets_v6[i].gateway_ip)
            self._check_connectivity(sshv4_2,
                                     ips_from_api_1['6'][i])
            self._check_connectivity(sshv4_2,
                                     self.subnets_v6[i].gateway_ip)
    def test_keystone_roles_table(self):
        role_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'roles')['columns'])
        role_id_col = next(i for i, c in enumerate(role_schema)
                           if c['name'] == 'id')

        def _check_data_table_keystone_roles():
            # Fetch data from keystone each time, because this test may start
            # before keystone has all the users.
            roles = self.roles_client.list_roles()['roles']
            roles_map = {}
            for role in roles:
                roles_map[role['id']] = role

            results = (
                self.admin_manager.congress_client.list_datasource_rows(
                    self.datasource_id, 'roles'))
            for row in results['results']:
                try:
                    role_row = roles_map[row['data'][role_id_col]]
                except KeyError:
                    return False
                for index in range(len(role_schema)):
                    if (str(row['data'][index]) !=
                            str(role_row[role_schema[index]['name']])):
                        return False
            return True

        if not test.call_until_true(func=_check_data_table_keystone_roles,
                                    duration=100, sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
    def test_glancev2_tags_table(self):
        def _check_data_table_glance_images():
            # Fetch data from glance each time, because this test may start
            # before glance has all the users.
            images = self.glancev2.list_images()['images']
            image_tag_map = {}
            for image in images:
                image_tag_map[image['id']] = image['tags']

            results = (
                self.admin_manager.congress_client.list_datasource_rows(
                    self.datasource_id, 'tags'))
            for row in results['results']:
                image_id, tag = row['data'][0], row['data'][1]
                glance_image_tags = image_tag_map.get(image_id)
                if not glance_image_tags:
                    # congress had image that glance doesn't know about.
                    return False
                if tag not in glance_image_tags:
                    # congress had a tag that wasn't on the image.
                    return False
            return True

        if not test.call_until_true(func=_check_data_table_glance_images,
                                    duration=100, sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #14
0
    def test_ceilometer_meters_table(self):
        meter_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'meters')['columns'])
        meter_id_col = next(i for i, c in enumerate(meter_schema)
                            if c['name'] == 'meter_id')

        def _check_data_table_ceilometer_meters():
            # Fetch data from ceilometer each time, because this test may start
            # before ceilometer has all the users.
            meters = self.telemetry_client.list_meters()
            meter_map = {}
            for meter in meters:
                meter_map[meter['meter_id']] = meter

            results = (
                self.admin_manager.congress_client.list_datasource_rows(
                    self.datasource_id, 'meters'))
            for row in results['results']:
                try:
                    meter_row = meter_map[row['data'][meter_id_col]]
                except KeyError:
                    return False
                for index in range(len(meter_schema)):
                    if (str(row['data'][index]) !=
                            str(meter_row[meter_schema[index]['name']])):
                        return False
            return True

        if not test.call_until_true(func=_check_data_table_ceilometer_meters,
                                    duration=100, sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #15
0
    def test_neutronv2_networks_table(self):
        def _check_data():
            networks = self.neutron_client.list_networks()
            network_map = {}
            for network in networks['networks']:
                network_map[network['id']] = network

            client = self.admin_manager.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            network_schema = (client.show_datasource_table_schema(
                self.datasource_id, 'networks')['columns'])

            results = (client.list_datasource_rows(
                self.datasource_id, 'networks'))
            for row in results['results']:
                network_row = network_map[row['data'][0]]
                for index in range(len(network_schema)):
                    if (str(row['data'][index]) !=
                            str(network_row[network_schema[index]['name']])):
                        return False
            return True

        if not test.call_until_true(func=_check_data,
                                    duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #16
0
    def wait_for_table_active(cls, table_name, timeout=120, interval=3):
        def check():
            resp = cls.client.describe_table(table_name)
            if "Table" in resp and "TableStatus" in resp["Table"]:
                return resp["Table"]["TableStatus"] == "ACTIVE"

        return test.call_until_true(check, timeout, interval)
Exemple #17
0
    def test_nova_datasource_driver_flavors(self):
        _, flavors = self.flavors_client.list_flavors_with_detail()
        flavor_id_map = {}
        for flavor in flavors:
            flavor_id_map[flavor['id']] = flavor

        def _check_data_table_nova_flavors():
            results = \
                self.admin_manager.congress_policy_client.list_datasource_rows(
                    'nova', 'flavors')
            keys = ['id', 'name', 'vcpus', 'ram', 'disk',
                    'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor']
            for row in results['results']:
                match = True
                flavor_row = flavor_id_map[row['data'][0]]
                for index in range(len(keys)):
                    if row['data'][index] != flavor_row[keys[index]]:
                        match = False
                        break
                if match:
                    return True
            return False

        if not test.call_until_true(func=_check_data_table_nova_flavors,
                                    duration=20, sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #18
0
    def test_trust_subscription(self):
        sub_queue = data_utils.rand_name('Queues-Test')
        self.addCleanup(self.client.delete_queue, sub_queue)
        subscriber = 'trust+{0}/{1}/queues/{2}/messages'.format(
            self.client.base_url, self.client.uri_prefix, sub_queue)
        post_body = json.dumps(
            {'messages': [{'body': '$zaqar_message$', 'ttl': 60}]})
        post_headers = {'X-Project-ID': self.client.tenant_id,
                        'Client-ID': str(uuid.uuid4())}
        sub_body = {'ttl': 1200, 'subscriber': subscriber,
                    'options': {'post_data': post_body,
                                'post_headers': post_headers}}

        self.create_subscription(queue_name=self.queue_name, rbody=sub_body)
        message_body = self.generate_message_body()
        self.post_messages(queue_name=self.queue_name, rbody=message_body)

        if not test.call_until_true(
                lambda: self.list_messages(sub_queue)[1]['messages'], 10, 1):
            self.fail("Couldn't get messages")
        _, body = self.list_messages(sub_queue)
        expected = message_body['messages'][0]
        expected['queue_name'] = self.queue_name
        expected['Message_Type'] = 'Notification'
        for message in body['messages']:
            # There are two message in the queue. One is the confirm message,
            # the other one is the notification.
            if message['body']['Message_Type'] == 'Notification':
                self.assertEqual(expected, message['body'])
    def test_policy_basic_op(self):
        self._setup_network_and_servers()
        body = {"rule": "port_security_group(id, security_group_name) "
                        ":-neutronv2:ports(id, tenant_id, name, network_id,"
                        "mac_address, admin_state_up, status, device_id, "
                        "device_owner),"
                        "neutronv2:security_group_port_bindings(id, "
                        "security_group_id), neutronv2:security_groups("
                        "security_group_id, tenant_id1, security_group_name,"
                        "description)"}
        results = self.admin_manager.congress_client.create_policy_rule(
            'classification', body)
        rule_id = results['id']
        self.addCleanup(
            self.admin_manager.congress_client.delete_policy_rule,
            'classification', rule_id)

        # Find the ports of on this server
        ports = self._list_ports(device_id=self.servers[0]['id'])

        def check_data():
            results = self.admin_manager.congress_client.list_policy_rows(
                'classification', 'port_security_group')
            for row in results['results']:
                if (row['data'][0] == ports[0]['id'] and
                    row['data'][1] ==
                        self.servers[0]['security_groups'][0]['name']):
                        return True
            else:
                return False

        if not test.call_until_true(func=check_data,
                                    duration=100, sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #20
0
    def test_cinder_volumes_table(self):
        volume_schema = self.admin_manager.congress_client.show_datasource_table_schema(self.datasource_id, "volumes")[
            "columns"
        ]
        volume_id_col = next(i for i, c in enumerate(volume_schema) if c["name"] == "id")

        def _check_data_table_cinder_volumes():
            # Fetch data from cinder each time, because this test may start
            # before cinder has all the users.
            volumes = self.cinder.list_volumes()
            volumes_map = {}
            for volume in volumes:
                volumes_map[volume["id"]] = volume

            results = self.admin_manager.congress_client.list_datasource_rows(self.datasource_id, "volumes")
            for row in results["results"]:
                try:
                    volume_row = volumes_map[row["data"][volume_id_col]]
                except KeyError:
                    return False
                for index in range(len(volume_schema)):
                    if str(row["data"][index]) != str(volume_row[volume_schema[index]["name"]]):
                        return False
            return True

        if not test.call_until_true(func=_check_data_table_cinder_volumes, duration=100, sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time " "or failure in server")
Exemple #21
0
    def test_nova_datasource_driver_servers(self):
        self._setup_network_and_servers()

        def _check_data_table_nova_servers():
            results = \
                self.admin_manager.congress_policy_client.list_datasource_rows(
                    'nova', 'servers')
            keys = ['id', 'name', 'hostId', 'status', 'tenant_id',
                    'user_id', 'image', 'flavor']
            for row in results['results']:
                match = True
                for index in range(len(keys)):
                    if keys[index] in ['image', 'flavor']:
                        val = self.servers[0][keys[index]]['id']
                    else:
                        val = self.servers[0][keys[index]]

                    if row['data'][index] != val:
                        match = False
                        break
                if match:
                    return True
            return False

        if not test.call_until_true(func=_check_data_table_nova_servers,
                                    duration=20, sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #22
0
    def test_neutronv2_security_group_rules_table(self):
        sgrs_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'security_group_rules')['columns'])

        def _check_data():
            client = self.neutron_client
            security_groups_neutron = client.list_security_groups()
            sgrs_map = {}  # security_group_rules
            for sg in security_groups_neutron['security_groups']:
                for sgr in sg['security_group_rules']:
                    sgrs_map[sgr['id']] = sgr

            client = self.admin_manager.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            security_group_rules = (
                client.list_datasource_rows(
                    self.datasource_id, 'security_group_rules'))

            # Validate security_group_rules table
            for row in security_group_rules['results']:
                sg_rule_row = sgrs_map[row['data'][1]]
                for index in range(len(sgrs_schema)):
                    if (str(row['data'][index]) !=
                            str(sg_rule_row[sgrs_schema[index]['name']])):
                        return False
            return True

        if not test.call_until_true(func=_check_data,
                                    duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #23
0
    def test_nova_datasource_driver_flavors(self):
        def _check_data_table_nova_flavors():
            # Fetch data from nova each time, because this test may start
            # before nova has all the users.
            flavors = self.flavors_client.list_flavors(detail=True)
            flavor_id_map = {}
            for flavor in flavors:
                flavor_id_map[flavor['id']] = flavor

            results = (
                self.admin_manager.congress_client.list_datasource_rows(
                    self.datasource_id, 'flavors'))
            # TODO(alexsyip): Not sure what the following OS-FLV-EXT-DATA:
            # prefix is for.
            keys = ['id', 'name', 'vcpus', 'ram', 'disk',
                    'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor']
            for row in results['results']:
                match = True
                try:
                    flavor_row = flavor_id_map[row['data'][0]]
                except KeyError:
                    return False
                for index in range(len(keys)):
                    if row['data'][index] != flavor_row[keys[index]]:
                        match = False
                        break
                if match:
                    return True
            return False

        if not test.call_until_true(func=_check_data_table_nova_flavors,
                                    duration=100, sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #24
0
    def test_neutronv2_security_group_rules_table(self):
        sgrs_schema = self.admin_manager.congress_client.show_datasource_table_schema(
            self.datasource_id, "security_group_rules"
        )["columns"]

        @helper.retry_on_exception
        def _check_data():
            client = self.security_groups_client
            security_groups_neutron = client.list_security_groups()
            sgrs_map = {}  # security_group_rules
            for sg in security_groups_neutron["security_groups"]:
                for sgr in sg["security_group_rules"]:
                    sgrs_map[sgr["id"]] = sgr

            client = self.admin_manager.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            security_group_rules = client.list_datasource_rows(self.datasource_id, "security_group_rules")

            # Validate security_group_rules table
            for row in security_group_rules["results"]:
                sg_rule_row = sgrs_map[row["data"][1]]
                for index in range(len(sgrs_schema)):
                    if str(row["data"][index]) != str(sg_rule_row[sgrs_schema[index]["name"]]):
                        return False
            return True

        if not test.call_until_true(func=_check_data, duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time " "or failure in server")
    def test_execute_basic_action_plan(self):
        """Execute an action plan based on the BASIC strategy

        - create an audit template with the basic strategy
        - run the audit to create an action plan
        - get the action plan
        - run the action plan
        - get results and make sure it succeeded
        """
        self.addCleanup(self.rollback_compute_nodes_status)
        self._create_one_instance_per_host()
        _, goal = self.client.show_goal(self.BASIC_GOAL)
        _, audit_template = self.create_audit_template(goal['uuid'])
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(test.call_until_true(
            func=functools.partial(self.has_audit_succeeded, audit['uuid']),
            duration=600,
            sleep_for=2
        ))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        # Execute the action by changing its state to PENDING
        _, updated_ap = self.client.update_action_plan(
            action_plan['uuid'],
            patch=[{'path': '/state', 'op': 'replace', 'value': 'PENDING'}]
        )

        self.assertTrue(test.call_until_true(
            func=functools.partial(
                self.has_action_plan_finished, action_plan['uuid']),
            duration=600,
            sleep_for=2
        ))
        _, finished_ap = self.client.show_action_plan(action_plan['uuid'])
        _, action_list = self.client.list_actions(
            action_plan_uuid=finished_ap["uuid"])

        self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
        self.assertEqual('SUCCEEDED', finished_ap['state'])

        for action in action_list['actions']:
            self.assertEqual('SUCCEEDED', action.get('state'))
Exemple #26
0
    def _prepare_and_test(self, address6_mode, n_subnets6=1):
        self.prepare_network(address6_mode=address6_mode,
                             n_subnets6=n_subnets6)

        sshv4_1, ips_from_api_1 = self.prepare_server()
        sshv4_2, ips_from_api_2 = self.prepare_server()

        def guest_has_address(ssh, addr):
            return addr in ssh.get_ip_list()

        # get addresses assigned to vNIC as reported by 'ip address' utility
        ips_from_ip_1 = sshv4_1.get_ip_list()
        ips_from_ip_2 = sshv4_2.get_ip_list()
        self.assertIn(ips_from_api_1['4'], ips_from_ip_1)
        self.assertIn(ips_from_api_2['4'], ips_from_ip_2)
        for i in range(n_subnets6):
            # v6 should be configured since the image supports it
            # It can take time for ipv6 automatic address to get assigned
            srv1_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_1, ips_from_api_1['6'][i])

            srv2_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_2, ips_from_api_2['6'][i])

            self.assertTrue(test.call_until_true(srv1_v6_addr_assigned,
                                                 CONF.compute.ping_timeout, 1))

            self.assertTrue(test.call_until_true(srv2_v6_addr_assigned,
                                                 CONF.compute.ping_timeout, 1))

        result = sshv4_1.ping_host(ips_from_api_2['4'])
        self.assertIn('0% packet loss', result)
        result = sshv4_2.ping_host(ips_from_api_1['4'])
        self.assertIn('0% packet loss', result)

        # Some VM (like cirros) may not have ping6 utility
        result = sshv4_1.exec_command('whereis ping6')
        is_ping6 = False if result == 'ping6:\n' else True
        if is_ping6:
            for i in range(n_subnets6):
                result = sshv4_1.ping_host(ips_from_api_2['6'][i])
                self.assertIn('0% packet loss', result)
                result = sshv4_2.ping_host(ips_from_api_1['6'][i])
                self.assertIn('0% packet loss', result)
        else:
            LOG.warning('Ping6 is not available, skipping')
    def _wait_for_active(self, fw_id):
        def _wait():
            _, firewall = self.client.show_firewall(fw_id)
            firewall = firewall["firewall"]
            return firewall["status"] == "ACTIVE"

        if not test.call_until_true(_wait, CONF.network.build_timeout, CONF.network.build_interval):
            m = "Timed out waiting for firewall %s to become ACTIVE." % fw_id
            raise exceptions.TimeoutException(m)
    def _run_and_wait(self, key, data, content_type='application/json',
                      headers=None):
        def wait():
            return self.logs_search_client.count_search_messages(key) > 0

        self.assertEqual(0, self.logs_search_client.count_search_messages(key),
                         'Find log message in elasticsearch: {0}'.format(key))

        headers = base._get_headers(headers, content_type)
        data = base._get_data(data, content_type)

        response, _ = self.logs_client.send_single_log(data, headers)
        self.assertEqual(204, response.status)

        test.call_until_true(wait, _RETRY_COUNT, _RETRY_WAIT)
        response = self.logs_search_client.search_messages(key)
        self.assertEqual(1, len(response))

        return response
    def _wait_until_ready(self, fw_id):
        target_states = ("ACTIVE", "CREATED")

        def _wait():
            _, firewall = self.client.show_firewall(fw_id)
            firewall = firewall["firewall"]
            return firewall["status"] in target_states

        if not test.call_until_true(_wait, CONF.network.build_timeout, CONF.network.build_interval):
            m = "Timed out waiting for firewall %s to reach %s state(s)" % (fw_id, target_states)
            raise exceptions.TimeoutException(m)
    def _wait_for_active(self, fw_id):
        def _wait():
            resp, firewall = self.client.show_firewall(fw_id)
            self.assertEqual('200', resp['status'])
            firewall = firewall['firewall']
            return firewall['status'] == 'ACTIVE'

        if not test.call_until_true(_wait, CONF.network.build_timeout,
                                    CONF.network.build_interval):
            m = 'Timed out waiting for firewall %s to become ACTIVE.' % fw_id
            raise exceptions.TimeoutException(m)
    def _wait_until_ready(self, fw_id):
        target_states = ('ACTIVE', 'CREATED')

        def _wait():
            firewall = self.client.show_firewall(fw_id)
            firewall = firewall['firewall']
            return firewall['status'] in target_states

        if not test.call_until_true(_wait, CONF.network.build_timeout,
                                    CONF.network.build_interval):
            m = ("Timed out waiting for firewall %s to reach %s state(s)" %
                 (fw_id, target_states))
            raise exceptions.TimeoutException(m)
Exemple #32
0
    def _wait_for_volume_available_on_the_system(self, ip_address,
                                                 private_key):
        ssh = self.get_remote_client(ip_address, private_key=private_key)

        def _func():
            part = ssh.get_partitions()
            LOG.debug("Partitions:%s" % part)
            return CONF.compute.volume_device_name in part

        if not test.call_until_true(_func,
                                    CONF.compute.build_timeout,
                                    CONF.compute.build_interval):
            raise exceptions.TimeoutException
Exemple #33
0
    def resource_cleanup(cls):
        """Ensure that all created objects get destroyed."""
        try:
            action_plans_to_be_deleted = set()
            # Phase 1: Make sure all objects are in an idle state
            for audit_uuid in cls.created_audits:
                test.call_until_true(
                    func=functools.partial(
                        cls.is_audit_idle, audit_uuid),
                    duration=30,
                    sleep_for=.5
                )

            for audit_uuid in cls.created_action_plans_audit_uuids:
                _, action_plans = cls.client.list_action_plans(
                    audit_uuid=audit_uuid)
                action_plans_to_be_deleted.update(
                    ap['uuid'] for ap in action_plans['action_plans'])

                for action_plan in action_plans['action_plans']:
                    test.call_until_true(
                        func=functools.partial(
                            cls.is_action_plan_idle, action_plan['uuid']),
                        duration=30,
                        sleep_for=.5
                    )

            # Phase 2: Delete them all
            for action_plan_uuid in action_plans_to_be_deleted:
                cls.delete_action_plan(action_plan_uuid)

            for audit_uuid in cls.created_audits.copy():
                cls.delete_audit(audit_uuid)

            for audit_template_uuid in cls.created_audit_templates.copy():
                cls.delete_audit_template(audit_template_uuid)

        finally:
            super(BaseInfraOptimTest, cls).resource_cleanup()
Exemple #34
0
    def _ping_ip_address(self, ip_address):
        cmd = ['ping', '-c1', '-w1', ip_address]

        def ping():
            proc = subprocess.Popen(cmd,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc.wait()
            if proc.returncode == 0:
                return True

        # TODO(mnewby) Allow configuration of execution and sleep duration.
        return test.call_until_true(ping, 20, 1)
Exemple #35
0
    def resource_setup(cls):
        super(TestShowListActionPlan, cls).resource_setup()
        _, cls.goal = cls.client.show_goal("DUMMY")
        _, cls.audit_template = cls.create_audit_template(cls.goal['uuid'])
        _, cls.audit = cls.create_audit(cls.audit_template['uuid'])

        assert test.call_until_true(func=functools.partial(
            cls.has_audit_succeeded, cls.audit['uuid']),
                                    duration=30,
                                    sleep_for=.5)
        _, action_plans = cls.client.list_action_plans(
            audit_uuid=cls.audit['uuid'])
        cls.action_plan = action_plans['action_plans'][0]
Exemple #36
0
    def test_neutronv2_routers_tables(self):
        router_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'routers')['columns'])

        ext_gw_info_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'external_gateway_infos')['columns'])

        @helper.retry_on_exception
        def _check_data():
            routers_from_neutron = self.routers_client.list_routers()
            router_map = {}
            for router in routers_from_neutron['routers']:
                router_map[router['id']] = router

            client = self.admin_manager.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            routers = (client.list_datasource_rows(self.datasource_id,
                                                   'routers'))

            ext_gw_info = (client.list_datasource_rows(
                self.datasource_id, 'external_gateway_infos'))

            # Validate routers table
            for row in routers['results']:
                router_row = router_map[row['data'][0]]
                for index in range(len(router_schema)):
                    if (str(row['data'][index]) != str(
                            router_row[router_schema[index]['name']])):
                        return False

            # validate external_gateway_infos
            for row in ext_gw_info['results']:
                router_ext_gw_info = (
                    router_map[row['data'][0]]['external_gateway_info'])
                # populate router_id
                router_ext_gw_info['router_id'] = row['data'][0]
                for index in range(len(ext_gw_info_schema)):
                    val = router_ext_gw_info[ext_gw_info_schema[index]['name']]
                    if (str(row['data'][index]) != str(val)):
                        return False
            return True

        if not test.call_until_true(
                func=_check_data, duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #37
0
    def test_cinder_volumes_table(self):
        volume_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'volumes')['columns'])
        volume_id_col = next(i for i, c in enumerate(volume_schema)
                             if c['name'] == 'id')

        def _check_data_table_cinder_volumes():
            # Fetch data from cinder each time, because this test may start
            # before cinder has all the users.
            volumes = self.cinder.list_volumes()['volumes']
            LOG.debug('cinder volume list: %s', volumes)
            volumes_map = {}
            for volume in volumes:
                volumes_map[volume['id']] = volume

            results = (self.admin_manager.congress_client.list_datasource_rows(
                self.datasource_id, 'volumes'))
            LOG.debug('congress cinder volumes table: %s', results)
            # check that congress and cinder return the same volume IDs
            rows_volume_id_set = set()
            for row in results['results']:
                rows_volume_id_set.add(row['data'][volume_id_col])
            if rows_volume_id_set != frozenset(volumes_map.keys()):
                LOG.debug('volumes IDs mismatch')
                return False
            # FIXME(ekcs): the following code is broken because 'user_id'
            # and 'description' fields do not appear in results provided by
            # [tempest].os.volumes_client.list_volumes().
            # Detailed checking disabled for now. Re-enable when fixed.
            # It appears the code was written for v1 volumes client but never
            # worked. The problem was not evident because the list of volumes
            # was empty.
            # Additional adaptation is needed for v2 volumes client.
            # for row in results['results']:
            #     try:
            #         volume_row = volumes_map[row['data'][volume_id_col]]
            #     except KeyError:
            #         return False
            #     for index in range(len(volume_schema)):
            #         if (str(row['data'][index]) !=
            #                 str(volume_row[volume_schema[index]['name']])):
            #             return False
            return True

        if not test.call_until_true(func=_check_data_table_cinder_volumes,
                                    duration=100,
                                    sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
    def wait_ipsec_site_connection(self, site_id, status=None):
        def _wait():
            conn = self.show_ipsec_site_connection(site_id)
            return conn['status'] == status

        try:
            if not test.call_until_true(_wait, CONF.network.build_timeout,
                                        CONF.network.build_interval):
                m = ("Timed out waiting for ipsec site connection %s "
                     "to reach %s state" % (site_id, status))
                raise exc.TimeoutException(m)
            return self.show_ipsec_site_connection(site_id)
        except exc.NotFound as ex:
            if status:
                raise ex
Exemple #39
0
    def wait_for_table_active(self,
                              table_name,
                              timeout=120,
                              interval=1,
                              alt=False):
        def check():
            client = self.client if not alt else self.alt_client
            headers, body = client.describe_table(table_name)
            if "table" in body and "table_status" in body["table"]:
                status = body["table"]["table_status"]
                if status == TABLE_STATUS_CREATE_FAILED:
                    self.fail('Table creation failure.')
                return body["table"]["table_status"] == TABLE_STATUS_ACTIVE

        return test.call_until_true(check, timeout, interval)
Exemple #40
0
    def test_execute_dummy_action_plan(self):
        _, goal = self.client.show_goal("DUMMY")
        _, audit_template = self.create_audit_template(goal['uuid'])
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(
            test.call_until_true(func=functools.partial(
                self.has_audit_succeeded, audit['uuid']),
                                 duration=30,
                                 sleep_for=.5))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        # Execute the action by changing its state to PENDING
        _, updated_ap = self.client.update_action_plan(action_plan['uuid'],
                                                       patch=[{
                                                           'path':
                                                           '/state',
                                                           'op':
                                                           'replace',
                                                           'value':
                                                           'PENDING'
                                                       }])

        self.assertTrue(
            test.call_until_true(func=functools.partial(
                self.has_action_plan_finished, action_plan['uuid']),
                                 duration=30,
                                 sleep_for=.5))
        _, finished_ap = self.client.show_action_plan(action_plan['uuid'])

        self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
        self.assertEqual('SUCCEEDED', finished_ap['state'])
Exemple #41
0
    def create_and_add_security_group_to_server(self, server):
        secgroup = self._create_security_group()
        self.servers_client.add_security_group(server['id'], secgroup['name'])
        self.addCleanup(self.servers_client.remove_security_group,
                        server['id'], secgroup['name'])

        def wait_for_secgroup_add():
            body = (self.servers_client.show_server(server['id'])['server'])
            return {'name': secgroup['name']} in body['security_groups']

        if not test.call_until_true(wait_for_secgroup_add,
                                    CONF.compute.build_timeout,
                                    CONF.compute.build_interval):
            msg = ('Timed out waiting for adding security group %s to server '
                   '%s' % (secgroup['id'], server['id']))
            raise exceptions.TimeoutException(msg)
    def call_until_valid(self, func, duration, *args, **kwargs):
        # Call until get valid response for "duration"
        # because tenant usage doesn't become available immediately
        # after create VM.
        def is_valid():
            try:
                self.resp = func(*args, **kwargs)
                return True
            except e.InvalidHTTPResponseBody:
                return False

        self.assertEqual(
            test.call_until_true(is_valid, duration, 1), True,
            "%s not return valid response in %s secs" %
            (func.__name__, duration))
        return self.resp
Exemple #43
0
    def wait_for_table_deleted(self,
                               table_name,
                               timeout=120,
                               interval=1,
                               alt=False):
        def check():
            client = self.client if not alt else self.alt_client
            try:
                headers, body = client.describe_table(table_name)
                if "table" in body and "table_status" in body["table"]:
                    status = body["table"]["table_status"]
                    if status == TABLE_STATUS_DELETE_FAILED:
                        self.fail('Table deletion failure.')
            except exceptions.NotFound:
                return True

        return test.call_until_true(check, timeout, interval)
Exemple #44
0
    def wait_for_node(self, node_name):
        def check_node():
            try:
                self.node_show(node_name)
            except lib_exc.NotFound:
                return False
            return True

        if not test.call_until_true(
                check_node,
                duration=CONF.baremetal_introspection.discovery_timeout,
                sleep_for=20):
            msg = ("Timed out waiting for node %s " % node_name)
            raise lib_exc.TimeoutException(msg)

        inspected_node = self.node_show(self.node_info['name'])
        self.wait_for_introspection_finished(inspected_node['uuid'])
Exemple #45
0
    def test_create_action_plan(self):
        _, audit_template = self.create_audit_template()
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(test.call_until_true(
            func=functools.partial(self.has_audit_succeeded, audit['uuid']),
            duration=30,
            sleep_for=.5
        ))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        self.assertEqual(audit['uuid'], action_plan['audit_uuid'])
        self.assertEqual('RECOMMENDED', action_plan['state'])
    def test_all_loaded_datasources_are_initialized(self):
        datasources = self.admin_manager.congress_client.list_datasources()

        def _check_all_datasources_are_initialized():
            for datasource in datasources['results']:
                results = (
                    self.admin_manager.congress_client.list_datasource_status(
                        datasource['id']))
                if results['initialized'] != 'True':
                    return False
            return True

        if not test.call_until_true(
                func=_check_all_datasources_are_initialized,
                duration=20,
                sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #47
0
    def create_action_plan(cls, audit_template_uuid, **audit_kwargs):
        """Wrapper utility for creating a test action plan

        :param audit_template_uuid: Audit template UUID to use
        :param audit_kwargs: Dict of audit properties to set
        :return: The action plan as dict
        """
        _, audit = cls.create_audit(audit_template_uuid, **audit_kwargs)
        audit_uuid = audit['uuid']

        assert test.call_until_true(func=functools.partial(
            cls.has_audit_succeeded, audit_uuid),
                                    duration=30,
                                    sleep_for=.5)

        _, action_plans = cls.client.list_action_plans(audit_uuid=audit_uuid)

        return action_plans['action_plans'][0]
    def test_all_datasources_have_tables(self):
        datasources = self.admin_manager.congress_client.list_datasources()

        def check_data():
            for datasource in datasources['results']:
                results = (
                    self.admin_manager.congress_client.list_datasource_tables(
                        datasource['id']))
                # NOTE(arosen): if there are no results here we return false as
                # there is something wrong with a driver as it doesn't expose
                # any tables.
                if not results['results']:
                    return False
            return True

        if not test.call_until_true(func=check_data, duration=20, sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
    def _test_remote_connectivity(self, source, dest, should_succeed=True):
        """
        check ping server via source ssh connection

        :param source: RemoteClient: an ssh connection from which to ping
        :param dest: and IP to ping against
        :param should_succeed: boolean should ping succeed or not
        :returns: boolean -- should_succeed == ping
        :returns: ping is false if ping failed
        """
        def ping_remote():
            try:
                source.ping_host(dest)
            except exceptions.SSHExecCommandFailed as ex:
                LOG.debug(ex)
                return not should_succeed
            return should_succeed

        return call_until_true(ping_remote, CONF.compute.ping_timeout, 1)
    def test_glancev2_images_table(self):
        image_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                'glancev2', 'images')['columns'])
        image_id_col = next(i for i, c in enumerate(image_schema)
                            if c['name'] == 'id')

        def _check_data_table_glancev2_images():
            # Fetch data from glance each time, because this test may start
            # before glance has all the users.
            images = self.glancev2.image_list()
            image_map = {}
            for image in images:
                image_map[image['id']] = image

            results = (self.admin_manager.congress_client.list_datasource_rows(
                'glancev2', 'images'))
            for row in results['results']:
                try:
                    image_row = image_map[row['data'][image_id_col]]
                except KeyError:
                    return False
                for index in range(len(image_schema)):
                    # glancev2 doesn't return kernel_id/ramdisk_id if
                    # it isn't present...
                    if ((image_schema[index]['name'] == 'kernel_id'
                         and 'kernel_id' not in row['data'])
                            or (image_schema[index]['name'] == 'ramdisk_id'
                                and 'ramdisk_id' not in row['data'])):
                        continue

                    # FIXME(arosen): congress-server should retain the type
                    # but doesn't today.
                    if (str(row['data'][index]) != str(
                            image_row[image_schema[index]['name']])):
                        return False
            return True

        if not test.call_until_true(func=_check_data_table_glancev2_images,
                                    duration=20,
                                    sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #51
0
    def test_delete_action_plan(self):
        _, audit_template = self.create_audit_template()
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(test.call_until_true(
            func=functools.partial(self.has_audit_succeeded, audit['uuid']),
            duration=30,
            sleep_for=.5
        ))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        self.client.delete_action_plan(action_plan['uuid'])

        self.assertRaises(lib_exc.NotFound, self.client.show_action_plan,
                          action_plan['uuid'])
Exemple #52
0
    def test_nova_datasource_driver_servers(self):
        self._setup_network_and_servers()

        server_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'servers')['columns'])
        # Convert some of the column names.

        def convert_col(col):
            if col == 'host_id':
                return 'hostId'
            elif col == 'image_id':
                return 'image'
            elif col == 'flavor_id':
                return 'flavor'
            else:
                return col

        keys = [convert_col(c['name']) for c in server_schema]

        def _check_data_table_nova_servers():
            results = (
                self.admin_manager.congress_client.list_datasource_rows(
                    self.datasource_id, 'servers'))
            for row in results['results']:
                match = True
                for index in range(len(keys)):
                    if keys[index] in ['image', 'flavor']:
                        val = self.servers[0][keys[index]]['id']
                    else:
                        val = self.servers[0][keys[index]]

                    if row['data'][index] != val:
                        match = False
                        break
                if match:
                    return True
            return False

        if not test.call_until_true(func=_check_data_table_nova_servers,
                                    duration=20, sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
    def verify_metadata(self):
        if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
            # Verify metadata service
            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'

            def exec_cmd_and_verify_output():
                cmd = 'curl ' + md_url
                result = self.ssh_client.exec_command(cmd)
                if result:
                    msg = ('Failed while verifying metadata on server. Result '
                           'of command "%s" is NOT "%s".' % (cmd, self.fip))
                    self.assertEqual(self.fip, result, msg)
                    return 'Verification is successful!'

            if not test.call_until_true(exec_cmd_and_verify_output,
                                        CONF.compute.build_timeout,
                                        CONF.compute.build_interval):
                raise exceptions.TimeoutException('Timed out while waiting to '
                                                  'verify metadata on server. '
                                                  '%s is empty.' % md_url)
Exemple #54
0
    def wait_for_compute_node_setup(cls):
        def _are_compute_nodes_setup():
            try:
                hypervisors_client = cls.mgr.hypervisor_client
                hypervisors = hypervisors_client.list_hypervisors(
                    detail=True)['hypervisors']
                available_hypervisors = set(hyp['hypervisor_hostname']
                                            for hyp in hypervisors)
                available_services = set(
                    service['host']
                    for service in cls.get_compute_nodes_setup())

                return (available_hypervisors == available_services
                        and len(hypervisors) >= 2)
            except Exception:
                return False

        assert test.call_until_true(func=_are_compute_nodes_setup,
                                    duration=600,
                                    sleep_for=2)
    def test_delete_action_plan(self):
        _, goal = self.client.show_goal("dummy")
        _, audit_template = self.create_audit_template(goal['uuid'])
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(test.call_until_true(
            func=functools.partial(self.has_audit_finished, audit['uuid']),
            duration=30,
            sleep_for=.5
        ))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        self.client.delete_action_plan(action_plan['uuid'])

        self.assertRaises(exceptions.NotFound, self.client.show_action_plan,
                          action_plan['uuid'])
    def _confirm_notifications(self, container_name, obj_name):
        """
        Loop seeking for appropriate notifications about the containers
        and objects sent to swift.
        """

        def _check_samples():
            """
            Return True only if we have notifications about some
            containers and some objects and the notifications are about
            the expected containers and objects.
            Otherwise returning False will case _check_samples to be
            called again.
            """
            results = self.telemetry_client.list_samples(
                'storage.api.request')
            LOG.debug('got samples %s', results)

            # Extract container info from samples.
            containers, objects = [], []
            for sample in results:
                meta = sample['resource_metadata']
                if meta.get('container') and meta['container'] != 'None':
                    containers.append(meta['container'])
                elif (meta.get('target.metadata:container') and
                      meta['target.metadata:container'] != 'None'):
                    containers.append(meta['target.metadata:container'])

                if meta.get('object') and meta['object'] != 'None':
                    objects.append(meta['object'])
                elif (meta.get('target.metadata:object') and
                      meta['target.metadata:object'] != 'None'):
                    objects.append(meta['target.metadata:object'])

            return (container_name in containers and obj_name in objects)

        self.assertTrue(test.call_until_true(_check_samples,
                                             NOTIFICATIONS_WAIT,
                                             NOTIFICATIONS_SLEEP),
                        'Correct notifications were not received after '
                        '%s seconds.' % NOTIFICATIONS_WAIT)
Exemple #57
0
    def _check_remote_connectivity(self, source, dest, should_succeed=True):
        """check ping server via source ssh connection

        :param source: RemoteClient: an ssh connection from which to ping
        :param dest: and IP to ping against
        :param should_succeed: boolean should ping succeed or not
        :returns: boolean -- should_succeed == ping
        :returns: ping is false if ping failed
        """
        def ping_remote():
            try:
                self.ping_host(source, dest)
            except lib_exc.SSHExecCommandFailed:
                LOG.warning(_LW('Failed to ping IP: %(dest)s '
                                'via a ssh connection from: %(source)s.'),
                            {'dest': dest, 'source': source})
                return not should_succeed
            return should_succeed

        return test.call_until_true(ping_remote,
                                    CONF.validation.ping_timeout, 1)
Exemple #58
0
    def test_keystone_users_table(self):
        user_schema = (
            self.admin_manager.congress_client.show_datasource_table_schema(
                self.datasource_id, 'users')['columns'])
        user_id_col = next(i for i, c in enumerate(user_schema)
                           if c['name'] == 'id')

        def _check_data_table_keystone_users():
            # Fetch data from keystone each time, because this test may start
            # before keystone has all the users.
            users = self.users_client.list_users()['users']
            user_map = {}
            for user in users:
                user_map[user['id']] = user

            results = (self.admin_manager.congress_client.list_datasource_rows(
                self.datasource_id, 'users'))
            for row in results['results']:
                try:
                    user_row = user_map[row['data'][user_id_col]]
                except KeyError:
                    return False
                for index in range(len(user_schema)):
                    if ((user_schema[index]['name'] == 'tenantId'
                         and 'tenantId' not in user_row)
                            or (user_schema[index]['name'] == 'email'
                                and 'email' not in user_row)):
                        # Keystone does not return the tenantId or email column
                        # if not present.
                        pass
                    elif (str(row['data'][index]) != str(
                            user_row[user_schema[index]['name']])):
                        return False
            return True

        if not test.call_until_true(func=_check_data_table_keystone_users,
                                    duration=100,
                                    sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Exemple #59
0
    def test_trust_subscription(self):
        sub_queue = data_utils.rand_name('Queues-Test')
        self.addCleanup(self.client.delete_queue, sub_queue)
        subscriber = 'trust+{0}/{1}/queues/{2}/messages'.format(
            self.client.base_url, self.client.uri_prefix, sub_queue)
        post_body = json.dumps(
            {'messages': [{
                'body': '$zaqar_message$',
                'ttl': 60
            }]})
        post_headers = {
            'X-Project-ID': self.client.tenant_id,
            'Client-ID': str(uuid.uuid4())
        }
        sub_body = {
            'ttl': 1200,
            'subscriber': subscriber,
            'options': {
                'post_data': post_body,
                'post_headers': post_headers
            }
        }

        self.create_subscription(queue_name=self.queue_name, rbody=sub_body)
        message_body = self.generate_message_body()
        self.post_messages(queue_name=self.queue_name, rbody=message_body)

        if not test.call_until_true(
                lambda: self.list_messages(sub_queue)[1]['messages'], 10, 1):
            self.fail("Couldn't get messages")
        _, body = self.list_messages(sub_queue)
        expected = message_body['messages'][0]
        expected['queue_name'] = self.queue_name
        expected['Message_Type'] = 'Notification'
        for message in body['messages']:
            # There are two message in the queue. One is the confirm message,
            # the other one is the notification.
            if message['body']['Message_Type'] == 'Notification':
                self.assertEqual(expected, message['body'])
    def test_net_ip_availability_after_port_delete(self):
        net_name = data_utils.rand_name('network-')
        network = self.create_network(network_name=net_name)
        self.addCleanup(self.client.delete_network, network['id'])
        subnet, prefix = self._create_subnet(network, self.ip_version)
        self.addCleanup(self.client.delete_subnet, subnet['id'])
        port = self.client.create_port(network_id=network['id'])
        self.addCleanup(self._cleanUp_port, port['port']['id'])
        net_availability = self.admin_client.list_network_ip_availabilities()
        used_ip = self._get_used_ips(network, net_availability)
        self.client.delete_port(port['port']['id'])

        def get_net_availability():
            availabilities = self.admin_client.list_network_ip_availabilities()
            used_ip_after_port_delete = self._get_used_ips(
                network, availabilities)
            return used_ip - 1 == used_ip_after_port_delete

        self.assertTrue(
            test.call_until_true(get_net_availability, DELETE_TIMEOUT,
                                 DELETE_SLEEP),
            msg="IP address did not become available after port delete")