Ejemplo n.º 1
0
    def test_execute_dummy_action_plan(self):
        _, goal = self.client.show_goal("dummy")
        _, audit_template = self.create_audit_template(goal['uuid'])
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(
            test_utils.call_until_true(func=functools.partial(
                self.has_audit_finished, audit['uuid']),
                                       duration=30,
                                       sleep_for=.5))
        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        if action_plan['state'] in ['SUPERSEDED', 'SUCCEEDED']:
            # This means the action plan is superseded so we cannot trigger it,
            # or it is empty.
            return

        # Execute the action by changing its state to PENDING
        _, updated_ap = self.client.start_action_plan(action_plan['uuid'])

        self.assertTrue(
            test_utils.call_until_true(func=functools.partial(
                self.has_action_plan_finished, action_plan['uuid']),
                                       duration=30,
                                       sleep_for=.5))
        _, finished_ap = self.client.show_action_plan(action_plan['uuid'])

        self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
        self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED'))
Ejemplo n.º 2
0
 def wait_recordset_delete(self, recordset_client, zone_id, recordset_id,
                           **kwargs):
     self._delete_recordset(recordset_client, zone_id, recordset_id,
                            **kwargs)
     utils.call_until_true(self._check_recordset_deleted,
                           CONF.dns.build_timeout, CONF.dns.build_interval,
                           recordset_client, zone_id, recordset_id)
Ejemplo n.º 3
0
    def _run_and_wait(self,
                      key,
                      data,
                      version,
                      content_type='application/json',
                      headers=None,
                      fields=None):

        headers = base._get_headers(headers, content_type)

        def wait():
            return self.logs_search_client.count_search_messages(key,
                                                                 headers) > 0

        self.assertEqual(
            0, self.logs_search_client.count_search_messages(key, headers),
            'Find log message in elasticsearch: {0}'.format(key))

        headers = base._get_headers(headers, content_type)
        data = base._get_data(data, content_type, version=version)

        client = self.logs_clients[version]
        response, _ = client.send_single_log(data, headers, fields)
        self.assertEqual(204, response.status)

        test_utils.call_until_true(wait, _RETRY_COUNT * _RETRY_WAIT,
                                   _RETRY_WAIT)
        response = self.logs_search_client.search_messages(key, headers)
        self.assertEqual(1, len(response))

        return response
Ejemplo n.º 4
0
    def check_flip_status(self, floating_ip, status):
        """Verifies floatingip reaches the given status

        :param dict floating_ip: floating IP dict to check status
        :param status: target status
        :raises AssertionError: if status doesn't match
        """

        # TODO(ptoohill): Find a way to utilze the proper client method

        floatingip_id = floating_ip['id']

        def refresh():
            result = (self.floating_ips_client_admin.show_floatingip(
                floatingip_id)['floatingip'])
            return status == result['status']

        test_utils.call_until_true(refresh, 100, 1)

        floating_ip = self.floating_ips_client_admin.show_floatingip(
            floatingip_id)['floatingip']
        self.assertEqual(status,
                         floating_ip['status'],
                         message="FloatingIP: {fp} is at status: {cst}. "
                         "failed  to reach status: {st}".format(
                             fp=floating_ip,
                             cst=floating_ip['status'],
                             st=status))
        LOG.info("FloatingIP: {fp} is at status: {st}".format(fp=floating_ip,
                                                              st=status))
Ejemplo n.º 5
0
    def test_create_audit_continuous(self):
        _, goal = self.client.show_goal("dummy")
        _, audit_template = self.create_audit_template(goal['uuid'])

        audit_params = dict(
            audit_template_uuid=audit_template['uuid'],
            audit_type='CONTINUOUS',
            interval='7200',
            name='audit_continuous',
        )

        _, body = self.create_audit(**audit_params)
        audit_params.pop('audit_template_uuid')
        audit_params['goal_uuid'] = goal['uuid']
        self.assert_expected(audit_params, body)
        self.assertIn(body['state'], ('PENDING', 'ONGOING'))

        _, audit = self.client.show_audit(body['uuid'])
        self.assert_expected(audit, body)

        _, audit = self.update_audit(body['uuid'], [{
            'op': 'replace',
            'path': '/state',
            'value': 'CANCELLED'
        }])

        test_utils.call_until_true(func=functools.partial(
            self.is_audit_idle, body['uuid']),
                                   duration=10,
                                   sleep_for=.5)

        _, audit = self.client.show_audit(body['uuid'])
        self.assertEqual(audit['state'], 'CANCELLED')
Ejemplo n.º 6
0
    def test_update_audit(self):
        _, goal = self.client.show_goal("dummy")
        _, audit_template = self.create_audit_template(goal['uuid'])
        audit_params = dict(
            audit_template_uuid=audit_template['uuid'],
            audit_type='CONTINUOUS',
            interval='7200',
        )

        _, body = self.create_audit(**audit_params)
        audit_uuid = body['uuid']
        test_utils.call_until_true(func=functools.partial(
            self.is_audit_ongoing, audit_uuid),
                                   duration=10,
                                   sleep_for=.5)

        _, audit = self.update_audit(audit_uuid, [{
            'op': 'replace',
            'path': '/state',
            'value': 'CANCELLED'
        }])

        test_utils.call_until_true(func=functools.partial(
            self.is_audit_idle, audit_uuid),
                                   duration=10,
                                   sleep_for=.5)
Ejemplo n.º 7
0
    def check_floating_ip_status(self, floating_ip, status):
        """Verifies floatingip reaches the given status

        :param dict floating_ip: floating IP dict to check status
        :param status: target status
        :raises: AssertionError if status doesn't match
        """
        floatingip_id = floating_ip['id']

        def refresh():
            result = (self.floating_ips_client.show_floatingip(floatingip_id)
                      ['floatingip'])
            return status == result['status']

        test_utils.call_until_true(refresh, CONF.network.build_timeout,
                                   CONF.network.build_interval)
        floating_ip = self.floating_ips_client.show_floatingip(
            floatingip_id)['floatingip']
        self.assertEqual(status,
                         floating_ip['status'],
                         message="FloatingIP: {fp} is at status: {cst}. "
                         "failed  to reach status: {st}".format(
                             fp=floating_ip,
                             cst=floating_ip['status'],
                             st=status))
        LOG.info("FloatingIP: {fp} is at status: {st}".format(fp=floating_ip,
                                                              st=status))
Ejemplo n.º 8
0
 def wait_zone_delete(self, zone_client, zone_id, **kwargs):
     zone_client.delete_zone(zone_id, **kwargs)
     utils.call_until_true(self._check_zone_deleted,
                           CONF.dns.build_timeout,
                           CONF.dns.build_interval,
                           zone_client,
                           zone_id)
Ejemplo n.º 9
0
    def test_live_migration_with_trunk(self):
        """Test live migration with trunk and subport"""
        trunk, parent, subport = self._create_trunk_with_subport()

        server = self.create_test_server(wait_until="ACTIVE",
                                         networks=[{
                                             'port': parent['id']
                                         }])

        # Wait till subport status is ACTIVE
        self.assertTrue(
            test_utils.call_until_true(self._is_port_status_active,
                                       CONF.validation.connect_timeout, 5,
                                       subport['id']))
        parent = self.ports_client.show_port(parent['id'])['port']
        self.assertEqual('ACTIVE', parent['status'])
        subport = self.ports_client.show_port(subport['id'])['port']

        if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
            # not to specify a host so that the scheduler will pick one
            target_host = None
        else:
            target_host = self.get_host_other_than(server['id'])

        self._live_migrate(server['id'], target_host, 'ACTIVE')

        # Wait till subport status is ACTIVE
        self.assertTrue(
            test_utils.call_until_true(self._is_port_status_active,
                                       CONF.validation.connect_timeout, 5,
                                       subport['id']))
        parent = self.ports_client.show_port(parent['id'])['port']
        self.assertEqual('ACTIVE', parent['status'])
Ejemplo n.º 10
0
    def test_delete_audit(self):
        _, goal = self.client.show_goal("dummy")
        _, audit_template = self.create_audit_template(goal['uuid'])
        _, body = self.create_audit(audit_template['uuid'])
        audit_uuid = body['uuid']

        test_utils.call_until_true(func=functools.partial(
            self.is_audit_idle, audit_uuid),
                                   duration=10,
                                   sleep_for=.5)

        def is_audit_deleted(uuid):
            try:
                return not bool(self.client.show_audit(uuid))
            except exceptions.NotFound:
                return True

        self.delete_audit(audit_uuid)

        test_utils.call_until_true(func=functools.partial(
            is_audit_deleted, audit_uuid),
                                   duration=5,
                                   sleep_for=1)

        self.assertTrue(is_audit_deleted(audit_uuid))
Ejemplo n.º 11
0
    def resource_cleanup(cls):
        """Ensure that all created objects get destroyed."""
        try:
            action_plans_to_be_deleted = set()
            # Phase 1: Make sure all objects are in an idle state
            for audit_uuid in cls.created_audits:
                test_utils.call_until_true(func=functools.partial(
                    cls.is_audit_idle, audit_uuid),
                                           duration=30,
                                           sleep_for=.5)

            for audit_uuid in cls.created_action_plans_audit_uuids:
                _, action_plans = cls.client.list_action_plans(
                    audit_uuid=audit_uuid)
                action_plans_to_be_deleted.update(
                    ap['uuid'] for ap in action_plans['action_plans'])

                for action_plan in action_plans['action_plans']:
                    test_utils.call_until_true(func=functools.partial(
                        cls.is_action_plan_idle, action_plan['uuid']),
                                               duration=30,
                                               sleep_for=.5)

            # Phase 2: Delete them all
            for action_plan_uuid in action_plans_to_be_deleted:
                cls.delete_action_plan(action_plan_uuid)

            for audit_uuid in cls.created_audits.copy():
                cls.delete_audit(audit_uuid)

            for audit_template_uuid in cls.created_audit_templates.copy():
                cls.delete_audit_template(audit_template_uuid)

        finally:
            super(BaseInfraOptimTest, cls).resource_cleanup()
    def test_execute_basic_action_plan(self):
        """Execute an action plan based on the BASIC strategy

        - create an audit template with the basic strategy
        - run the audit to create an action plan
        - get the action plan
        - run the action plan
        - get results and make sure it succeeded
        """
        self.addCleanup(self.rollback_compute_nodes_status)
        self._create_one_instance_per_host()

        _, goal = self.client.show_goal(self.GOAL_NAME)
        _, strategy = self.client.show_strategy("basic")
        _, audit_template = self.create_audit_template(
            goal['uuid'], strategy=strategy['uuid'])
        _, audit = self.create_audit(audit_template['uuid'])

        try:
            self.assertTrue(
                test_utils.call_until_true(func=functools.partial(
                    self.has_audit_finished, audit['uuid']),
                                           duration=600,
                                           sleep_for=2))
        except ValueError:
            self.fail("The audit has failed!")

        _, finished_audit = self.client.show_audit(audit['uuid'])
        if finished_audit.get('state') in ('FAILED', 'CANCELLED', 'SUSPENDED'):
            self.fail("The audit ended in unexpected state: %s!" %
                      finished_audit.get('state'))

        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        if action_plan['state'] in ('SUPERSEDED', 'SUCCEEDED'):
            # This means the action plan is superseded so we cannot trigger it,
            # or it is empty.
            return

        # Execute the action by changing its state to PENDING
        _, updated_ap = self.client.start_action_plan(action_plan['uuid'])

        self.assertTrue(
            test_utils.call_until_true(func=functools.partial(
                self.has_action_plan_finished, action_plan['uuid']),
                                       duration=600,
                                       sleep_for=2))
        _, finished_ap = self.client.show_action_plan(action_plan['uuid'])
        _, action_list = self.client.list_actions(
            action_plan_uuid=finished_ap["uuid"])

        self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
        self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED'))

        for action in action_list['actions']:
            self.assertEqual('SUCCEEDED', action.get('state'))
Ejemplo n.º 13
0
    def check_floating_ip_status(self, floating_ip, status):
        """Verifies floatingip reaches the given status

        :param dict floating_ip: floating IP dict to check status
        :param status: target status
        :raises: AssertionError if status doesn't match
        """
        floatingip_id = floating_ip['id']

        def refresh():
            result = (self.floating_ips_client.
                      show_floatingip(floatingip_id)['floatingip'])
            return status == result['status']

        test_utils.call_until_true(refresh,
                                   CONF.network.build_timeout,
                                   CONF.network.build_interval)
        floating_ip = self.floating_ips_client.show_floatingip(
            floatingip_id)['floatingip']
        self.assertEqual(status, floating_ip['status'],
                         message="FloatingIP: {fp} is at status: {cst}. "
                                 "failed  to reach status: {st}"
                         .format(fp=floating_ip, cst=floating_ip['status'],
                                 st=status))
        LOG.info("FloatingIP: {fp} is at status: {st}"
                 .format(fp=floating_ip, st=status))
Ejemplo n.º 14
0
    def _hotplug_server(self):
        old_floating_ip, server = self.floating_ip_tuple
        ip_address = old_floating_ip['floating_ip_address']
        private_key = self._get_server_key(server)
        ssh_client = self.get_remote_client(
            ip_address, private_key=private_key, server=server)
        old_nic_list = self._get_server_nics(ssh_client)
        # get a port from a list of one item
        port_list = self.os_admin.ports_client.list_ports(
            device_id=server['id'])['ports']
        self.assertEqual(1, len(port_list))
        old_port = port_list[0]
        interface = self.interface_client.create_interface(
            server_id=server['id'],
            net_id=self.new_net['id'])['interfaceAttachment']
        self.addCleanup(self.ports_client.wait_for_resource_deletion,
                        interface['port_id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.interface_client.delete_interface,
                        server['id'], interface['port_id'])

        def check_ports():
            self.new_port_list = [
                port for port in
                self.os_admin.ports_client.list_ports(
                    device_id=server['id'])['ports']
                if port['id'] != old_port['id']
            ]
            return len(self.new_port_list) == 1

        if not test_utils.call_until_true(
                check_ports, CONF.network.build_timeout,
                CONF.network.build_interval):
            raise exceptions.TimeoutException(
                "No new port attached to the server in time (%s sec)! "
                "Old port: %s. Number of new ports: %d" % (
                    CONF.network.build_timeout, old_port,
                    len(self.new_port_list)))
        new_port = self.new_port_list[0]

        def check_new_nic():
            new_nic_list = self._get_server_nics(ssh_client)
            self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
            return len(self.diff_list) == 1

        if not test_utils.call_until_true(
                check_new_nic, CONF.network.build_timeout,
                CONF.network.build_interval):
            raise exceptions.TimeoutException("Interface not visible on the "
                                              "guest after %s sec"
                                              % CONF.network.build_timeout)

        _, new_nic = self.diff_list[0]
        ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
                                new_port['fixed_ips'][0]['ip_address'],
                                CONF.network.project_network_mask_bits,
                                new_nic))
        ssh_client.exec_command("sudo ip link set %s up" % new_nic)
Ejemplo n.º 15
0
    def _hotplug_server(self):
        old_floating_ip, server = self.floating_ip_tuple
        ip_address = old_floating_ip['floating_ip_address']
        private_key = self._get_server_key(server)
        ssh_client = self.get_remote_client(
            ip_address, private_key=private_key, server=server)
        old_nic_list = self._get_server_nics(ssh_client)
        # get a port from a list of one item
        port_list = self.os_admin.ports_client.list_ports(
            device_id=server['id'])['ports']
        self.assertEqual(1, len(port_list))
        old_port = port_list[0]
        interface = self.interface_client.create_interface(
            server_id=server['id'],
            net_id=self.new_net['id'])['interfaceAttachment']
        self.addCleanup(self.ports_client.wait_for_resource_deletion,
                        interface['port_id'])
        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                        self.interface_client.delete_interface,
                        server['id'], interface['port_id'])

        def check_ports():
            self.new_port_list = [
                port for port in
                self.os_admin.ports_client.list_ports(
                    device_id=server['id'])['ports']
                if port['id'] != old_port['id']
            ]
            return len(self.new_port_list) == 1

        if not test_utils.call_until_true(
                check_ports, CONF.network.build_timeout,
                CONF.network.build_interval):
            raise exceptions.TimeoutException(
                "No new port attached to the server in time (%s sec)! "
                "Old port: %s. Number of new ports: %d" % (
                    CONF.network.build_timeout, old_port,
                    len(self.new_port_list)))
        new_port = self.new_port_list[0]

        def check_new_nic():
            new_nic_list = self._get_server_nics(ssh_client)
            self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
            return len(self.diff_list) == 1

        if not test_utils.call_until_true(
                check_new_nic, CONF.network.build_timeout,
                CONF.network.build_interval):
            raise exceptions.TimeoutException("Interface not visible on the "
                                              "guest after %s sec"
                                              % CONF.network.build_timeout)

        _, new_nic = self.diff_list[0]
        ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
                                new_port['fixed_ips'][0]['ip_address'],
                                CONF.network.project_network_mask_bits,
                                new_nic))
        ssh_client.exec_command("sudo ip link set %s up" % new_nic)
    def test_execute_dummy_action_plan(self):
        """Execute an action plan based on the 'dummy' strategy

        - create an audit template with the 'dummy' strategy
        - run the audit to create an action plan
        - get the action plan
        - run the action plan
        - get results and make sure it succeeded
        """
        _, goal = self.client.show_goal("dummy")
        _, audit_template = self.create_audit_template(goal['uuid'])
        _, audit = self.create_audit(audit_template['uuid'])

        self.assertTrue(
            test_utils.call_until_true(func=functools.partial(
                self.has_audit_finished, audit['uuid']),
                                       duration=30,
                                       sleep_for=.5))

        self.assertTrue(self.has_audit_succeeded(audit['uuid']))

        _, action_plans = self.client.list_action_plans(
            audit_uuid=audit['uuid'])
        action_plan = action_plans['action_plans'][0]

        _, action_plan = self.client.show_action_plan(action_plan['uuid'])

        if action_plan['state'] in ['SUPERSEDED', 'SUCCEEDED']:
            # This means the action plan is superseded so we cannot trigger it,
            # or it is empty.
            return

        # Execute the action by changing its state to PENDING
        _, updated_ap = self.client.start_action_plan(action_plan['uuid'])

        self.assertTrue(
            test_utils.call_until_true(func=functools.partial(
                self.has_action_plan_finished, action_plan['uuid']),
                                       duration=30,
                                       sleep_for=.5))
        _, finished_ap = self.client.show_action_plan(action_plan['uuid'])
        _, action_list = self.client.list_actions(
            action_plan_uuid=finished_ap["uuid"])

        action_counter = collections.Counter(act['action_type']
                                             for act in action_list['actions'])

        self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))
        self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED'))

        # A dummy strategy generates 2 "nop" actions and 1 "sleep" action
        self.assertEqual(3, len(action_list['actions']))
        self.assertEqual(2, action_counter.get("nop"))
        self.assertEqual(1, action_counter.get("sleep"))
Ejemplo n.º 17
0
    def _hotplug_server(self):
        old_floating_ip, server = self.floating_ip_tuple
        ip_address = old_floating_ip.floating_ip_address
        private_key = self._get_server_key(server)
        ssh_client = self.get_remote_client(ip_address,
                                            private_key=private_key)
        old_nic_list = self._get_server_nics(ssh_client)
        # get a port from a list of one item
        port_list = self.admin_manager.ports_client.list_ports(
            device_id=server['id'])['ports']
        self.assertEqual(1, len(port_list))
        old_port = port_list[0]
        _, interface = self.interface_client.create_interface(
            server=server['id'], network_id=self.new_net.id)
        self.addCleanup(self.ports_client.wait_for_resource_deletion,
                        interface['port_id'])
        self.addCleanup(self.delete_wrapper,
                        self.interface_client.delete_interface, server['id'],
                        interface['port_id'])

        def check_ports():
            ports = self.admin_manager.ports_client.list_ports(
                device_id=server['id'])['ports']
            self.new_port_list = [port for port in ports if port != old_port]
            return len(self.new_port_list) == 1

        if not test_utils.call_until_true(check_ports,
                                          CONF.network.build_timeout,
                                          CONF.network.build_interval):
            raise exceptions.TimeoutException("No new port attached to the "
                                              "server in time (%s sec) !" %
                                              CONF.network.build_timeout)
        new_port = self.ports_client.delete_port(self.new_port_list[0])

        def check_new_nic():
            new_nic_list = self._get_server_nics(ssh_client)
            self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
            return len(self.diff_list) == 1

        if not test_utils.call_until_true(check_new_nic,
                                          CONF.network.build_timeout,
                                          CONF.network.build_interval):
            raise exceptions.TimeoutException("Interface not visible on the "
                                              "guest after %s sec" %
                                              CONF.network.build_timeout)

        num, new_nic = self.diff_list[0]
        ssh_client.assign_static_ip(
            nic=new_nic,
            addr=new_port.fixed_ips[0]['ip_address'],
            network_mask_bits=CONF.network.project_network_mask_bits)
        ssh_client.turn_nic_on(nic=new_nic)
Ejemplo n.º 18
0
    def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
        net_list = self.prepare_network(address6_mode=address6_mode,
                                        n_subnets6=n_subnets6,
                                        dualnet=dualnet)

        sshv4_1, ips_from_api_1, sid1 = self.prepare_server(networks=net_list)
        sshv4_2, ips_from_api_2, sid2 = self.prepare_server(networks=net_list)

        def guest_has_address(ssh, addr):
            return addr in ssh.exec_command("ip address")

        # Turn on 2nd NIC for Cirros when dualnet
        if dualnet:
            _, network_v6 = net_list
            self.turn_nic6_on(sshv4_1, sid1, network_v6['id'])
            self.turn_nic6_on(sshv4_2, sid2, network_v6['id'])

        # get addresses assigned to vNIC as reported by 'ip address' utility
        ips_from_ip_1 = sshv4_1.exec_command("ip address")
        ips_from_ip_2 = sshv4_2.exec_command("ip address")
        self.assertIn(ips_from_api_1['4'], ips_from_ip_1)
        self.assertIn(ips_from_api_2['4'], ips_from_ip_2)
        for i in range(n_subnets6):
            # v6 should be configured since the image supports it
            # It can take time for ipv6 automatic address to get assigned
            srv1_v6_addr_assigned = functools.partial(guest_has_address,
                                                      sshv4_1,
                                                      ips_from_api_1['6'][i])

            srv2_v6_addr_assigned = functools.partial(guest_has_address,
                                                      sshv4_2,
                                                      ips_from_api_2['6'][i])

            self.assertTrue(
                test_utils.call_until_true(srv1_v6_addr_assigned,
                                           CONF.validation.ping_timeout, 1))

            self.assertTrue(
                test_utils.call_until_true(srv2_v6_addr_assigned,
                                           CONF.validation.ping_timeout, 1))

        self.check_remote_connectivity(sshv4_1, ips_from_api_2['4'])
        self.check_remote_connectivity(sshv4_2, ips_from_api_1['4'])

        for i in range(n_subnets6):
            self.check_remote_connectivity(sshv4_1, ips_from_api_2['6'][i])
            self.check_remote_connectivity(sshv4_1,
                                           self.subnets_v6[i]['gateway_ip'])
            self.check_remote_connectivity(sshv4_2, ips_from_api_1['6'][i])
            self.check_remote_connectivity(sshv4_2,
                                           self.subnets_v6[i]['gateway_ip'])
Ejemplo n.º 19
0
    def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
        net_list = self.prepare_network(address6_mode=address6_mode,
                                        n_subnets6=n_subnets6,
                                        dualnet=dualnet)

        sshv4_1, ips_from_api_1, sid1 = self.prepare_server(networks=net_list)
        sshv4_2, ips_from_api_2, sid2 = self.prepare_server(networks=net_list)

        def guest_has_address(ssh, addr):
            return addr in ssh.exec_command("ip address")

        # Turn on 2nd NIC for Cirros when dualnet
        if dualnet:
            _, network_v6 = net_list
            self.turn_nic6_on(sshv4_1, sid1, network_v6['id'])
            self.turn_nic6_on(sshv4_2, sid2, network_v6['id'])

        # get addresses assigned to vNIC as reported by 'ip address' utility
        ips_from_ip_1 = sshv4_1.exec_command("ip address")
        ips_from_ip_2 = sshv4_2.exec_command("ip address")
        self.assertIn(ips_from_api_1['4'], ips_from_ip_1)
        self.assertIn(ips_from_api_2['4'], ips_from_ip_2)
        for i in range(n_subnets6):
            # v6 should be configured since the image supports it
            # It can take time for ipv6 automatic address to get assigned
            srv1_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_1, ips_from_api_1['6'][i])

            srv2_v6_addr_assigned = functools.partial(
                guest_has_address, sshv4_2, ips_from_api_2['6'][i])

            self.assertTrue(test_utils.call_until_true(srv1_v6_addr_assigned,
                            CONF.validation.ping_timeout, 1))

            self.assertTrue(test_utils.call_until_true(srv2_v6_addr_assigned,
                            CONF.validation.ping_timeout, 1))

        self.check_remote_connectivity(sshv4_1, ips_from_api_2['4'])
        self.check_remote_connectivity(sshv4_2, ips_from_api_1['4'])

        for i in range(n_subnets6):
            self.check_remote_connectivity(sshv4_1,
                                           ips_from_api_2['6'][i])
            self.check_remote_connectivity(sshv4_1,
                                           self.subnets_v6[i]['gateway_ip'])
            self.check_remote_connectivity(sshv4_2,
                                           ips_from_api_1['6'][i])
            self.check_remote_connectivity(sshv4_2,
                                           self.subnets_v6[i]['gateway_ip'])
Ejemplo n.º 20
0
    def test_glancev2_tags_table(self):
        def _check_data_table_glance_images():
            # Fetch data from glance each time, because this test may start
            # before glance has all the users.
            images = self.glancev2.list_images()['images']
            image_tag_map = {}
            for image in images:
                image_tag_map[image['id']] = image['tags']

            results = (self.os_admin.congress_client.list_datasource_rows(
                self.datasource_id, 'tags'))
            for row in results['results']:
                image_id, tag = row['data'][0], row['data'][1]
                glance_image_tags = image_tag_map.get(image_id)
                if not glance_image_tags:
                    # congress had image that glance doesn't know about.
                    return False
                if tag not in glance_image_tags:
                    # congress had a tag that wasn't on the image.
                    return False
            return True

        if not test_utils.call_until_true(func=_check_data_table_glance_images,
                                          duration=100,
                                          sleep_for=5):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Ejemplo n.º 21
0
    def _check_remote_connectivity(self,
                                   source,
                                   dest,
                                   should_succeed=True,
                                   nic=None,
                                   mtu=None,
                                   fragmentation=True):
        """check ping server via source ssh connection

        :param source: RemoteClient: an ssh connection from which to ping
        :param dest: and IP to ping against
        :param should_succeed: boolean should ping succeed or not
        :param nic: specific network interface to ping from
        :param mtu: mtu size for the packet to be sent
        :param fragmentation: Flag for packet fragmentation
        :returns: boolean -- should_succeed == ping
        :returns: ping is false if ping failed
        """
        def ping_host(source,
                      host,
                      count=CONF.validation.ping_count,
                      size=CONF.validation.ping_size,
                      nic=None,
                      mtu=None,
                      fragmentation=True):
            addr = netaddr.IPAddress(host)
            cmd = 'ping6' if addr.version == 6 else 'ping'
            if nic:
                cmd = 'sudo {cmd} -I {nic}'.format(cmd=cmd, nic=nic)
            if mtu:
                if not fragmentation:
                    cmd += ' -M do'
                size = str(
                    net_utils.get_ping_payload_size(mtu=mtu,
                                                    ip_version=addr.version))
            cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
            return source.exec_command(cmd)

        def ping_remote():
            try:
                result = ping_host(source,
                                   dest,
                                   nic=nic,
                                   mtu=mtu,
                                   fragmentation=fragmentation)

            except lib_exc.SSHExecCommandFailed:
                LOG.warning(
                    'Failed to ping IP: %s via a ssh connection '
                    'from: %s.', dest, source.host)
                return not should_succeed
            LOG.debug('ping result: %s', result)
            # Assert that the return traffic was from the correct
            # source address.
            from_source = 'from %s' % dest
            self.assertIn(from_source, result)
            return should_succeed

        return test_utils.call_until_true(ping_remote,
                                          CONF.validation.ping_timeout, 1)
    def test_neutronv2_security_group_rules_table(self):
        sgrs_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'security_group_rules')['columns'])

        @helper.retry_on_exception
        def _check_data():
            client = self.security_groups_client
            security_groups_neutron = client.list_security_groups()
            sgrs_map = {}  # security_group_rules
            for sg in security_groups_neutron['security_groups']:
                for sgr in sg['security_group_rules']:
                    sgrs_map[sgr['id']] = sgr

            client = self.os_admin.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            security_group_rules = (
                client.list_datasource_rows(
                    self.datasource_id, 'security_group_rules'))

            # Validate security_group_rules table
            for row in security_group_rules['results']:
                sg_rule_row = sgrs_map[row['data'][1]]
                for index in range(len(sgrs_schema)):
                    if (str(row['data'][index]) !=
                            str(sg_rule_row[sgrs_schema[index]['name']])):
                        return False
            return True

        if not test_utils.call_until_true(func=_check_data,
                                          duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Ejemplo n.º 23
0
def wait_node_instance_association(client, instance_uuid, timeout=None,
                                   interval=None):
    """Waits for a node to be associated with instance_id.

    :param client: an instance of tempest plugin BaremetalClient.
    :param instance_uuid: UUID of the instance.
    :param timeout: the timeout after which the check is considered as failed.
        Defaults to CONF.baremetal.association_timeout.
    :param interval: an interval between show_node calls for status check.
        Defaults to client.build_interval.
    """
    timeout, interval = _determine_and_check_timeout_interval(
        timeout, CONF.baremetal.association_timeout,
        interval, client.build_interval)

    def is_some_node_associated():
        node = utils.get_node(client, instance_uuid=instance_uuid)
        return node is not None

    if not test_utils.call_until_true(is_some_node_associated, timeout,
                                      interval):
        msg = ('Timed out waiting to get Ironic node by instance UUID '
               '%(instance_uuid)s within the required time (%(timeout)s s).'
               % {'instance_uuid': instance_uuid, 'timeout': timeout})
        raise lib_exc.TimeoutException(msg)
    def test_neutronv2_networks_table(self):

        @helper.retry_on_exception
        def _check_data():
            networks = self.networks_client.list_networks()
            network_map = {}
            for network in networks['networks']:
                network_map[network['id']] = network

            client = self.os_admin.congress_client
            client.request_refresh(self.datasource_id)
            time.sleep(1)

            network_schema = (client.show_datasource_table_schema(
                self.datasource_id, 'networks')['columns'])

            results = (client.list_datasource_rows(
                self.datasource_id, 'networks'))
            for row in results['results']:
                network_row = network_map[row['data'][0]]
                for index in range(len(network_schema)):
                    if (str(row['data'][index]) !=
                            str(network_row[network_schema[index]['name']])):
                        return False
            return True

        if not test_utils.call_until_true(func=_check_data,
                                          duration=200, sleep_for=10):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
    def get_and_reserve_node(cls, node=None):
        """Pick an available node for deployment and reserve it.

        Only one instance_uuid may be associated, use this behaviour as
        reservation node when tests are launched concurrently. If node is
        not passed directly pick random available for deployment node.

        :param node: Ironic node to associate instance_uuid with.
        :returns: Ironic node.
        """
        instance_uuid = uuidutils.generate_uuid()
        nodes = []

        def _try_to_associate_instance():
            n = node or cls.get_random_available_node()
            if n is None:
                return False
            try:
                cls._associate_instance_with_node(n['uuid'], instance_uuid)
                nodes.append(n)
            except lib_exc.Conflict:
                return False
            return True

        if (not test_utils.call_until_true(
                _try_to_associate_instance,
                duration=CONF.baremetal.association_timeout,
                sleep_for=1)):
            msg = ('Timed out waiting to associate instance to ironic node '
                   'uuid %s' % instance_uuid)
            raise lib_exc.TimeoutException(msg)

        return nodes[0]
    def test_keystone_roles_table(self):
        role_schema = (
            self.os_admin.congress_client.show_datasource_table_schema(
                self.datasource_id, 'roles')['columns'])
        role_id_col = next(i for i, c in enumerate(role_schema)
                           if c['name'] == 'id')

        def _check_data_table_keystone_roles():
            # Fetch data from keystone each time, because this test may start
            # before keystone has all the users.
            roles = self.roles_client.list_roles()['roles']
            roles_map = {}
            for role in roles:
                roles_map[role['id']] = role

            results = (self.os_admin.congress_client.list_datasource_rows(
                self.datasource_id, 'roles'))
            for row in results['results']:
                try:
                    role_row = roles_map[row['data'][role_id_col]]
                except KeyError:
                    return False
                for index in range(len(role_schema)):
                    if (str(row['data'][index]) != str(
                            role_row[role_schema[index]['name']])):
                        return False
            return True

        if not test_utils.call_until_true(
                func=_check_data_table_keystone_roles, duration=100,
                sleep_for=4):
            raise exceptions.TimeoutException("Data did not converge in time "
                                              "or failure in server")
Ejemplo n.º 27
0
    def verify_metadata(self):
        if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
            # Verify metadata service
            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'

            def exec_cmd_and_verify_output():
                cmd = 'curl ' + md_url
                result = self.ssh_client.exec_command(cmd)
                if result:
                    msg = ('Failed while verifying metadata on server. Result '
                           'of command "%s" is NOT "%s".' % (cmd, self.fip))
                    self.assertEqual(self.fip, result, msg)
                    return 'Verification is successful!'

            if not test_utils.call_until_true(exec_cmd_and_verify_output,
                                              CONF.compute.build_timeout,
                                              CONF.compute.build_interval):
                raise exceptions.TimeoutException('Timed out while waiting to '
                                                  'verify metadata on server. '
                                                  '%s is empty.' % md_url)

            # Also, test a POST
            md_url = 'http://169.254.169.254/openstack/2013-10-17/password'
            data = data_utils.arbitrary_string(100)
            cmd = 'curl -X POST -d ' + data + ' ' + md_url
            self.ssh_client.exec_command(cmd)
            result = self.servers_client.show_password(self.instance['id'])
            self.assertEqual(data, result['password'])
Ejemplo n.º 28
0
    def restart_kuryr_controller(self):
        system_namespace = CONF.kuryr_kubernetes.kube_system_namespace
        for kuryr_pod_name in self.get_controller_pod_names():
            self.delete_pod(
                pod_name=kuryr_pod_name,
                body={"kind": "DeleteOptions",
                      "apiVersion": "v1",
                      "gracePeriodSeconds": 0},
                namespace=system_namespace)

            # make sure the kuryr pod was deleted
            self.wait_for_pod_status(
                kuryr_pod_name,
                namespace=system_namespace)

        # Check that new kuryr-controller is up and running
        for kuryr_pod_name in self.get_controller_pod_names():
            self.wait_for_pod_status(
                kuryr_pod_name,
                namespace=system_namespace,
                pod_status='Running',
                retries=120)

            # Wait until kuryr-controller pools are reloaded, i.e.,
            # kuryr-controller is ready
            res = test_utils.call_until_true(
                self.get_pod_readiness, 30, 1, kuryr_pod_name,
                namespace=system_namespace, container_name='controller')
            self.assertTrue(res, 'Timed out waiting for '
                                 'kuryr-controller to reload pools.')
    def send(self, cmd, timeout=CONF.validation.ssh_timeout,
             ssh_shell_prologue=None, one_off_attempt=False,
             assert_success=None, on_failure_return=None):

        output = [on_failure_return]

        if assert_success is None:
            assert_success = not one_off_attempt  # defaulting to False when
            #                                       one-off; else to True

        def send_cmd():
            try:
                cmd_out = self.exec_command(cmd, ssh_shell_prologue)
                output[0] = cmd_out
                return True
            except lib_exc.SSHExecCommandFailed as e:
                LOG.debug('[{}] cmd failed, got {}'.format(
                    self.tag, e))
                return False
            except Exception as e:
                self.parent.fail('[{}] "{}" cmd failed, got {} ({})'.format(
                    cmd, self.tag, e, e.__class__.__name__))

        if one_off_attempt:
            success = send_cmd()
        else:
            success = test_utils.call_until_true(send_cmd, timeout, 1)

        if assert_success:
            assert success

        return output[0]
Ejemplo n.º 30
0
    def verify_metadata(self):
        if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
            # Verify metadata service
            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'

            def exec_cmd_and_verify_output():
                cmd = 'curl ' + md_url
                result = self.ssh_client.exec_command(cmd)
                if result:
                    msg = ('Failed while verifying metadata on server. Result '
                           'of command "%s" is NOT "%s".' % (cmd, self.fip))
                    self.assertEqual(self.fip, result, msg)
                    return 'Verification is successful!'

            if not test_utils.call_until_true(exec_cmd_and_verify_output,
                                              CONF.compute.build_timeout,
                                              CONF.compute.build_interval):
                raise exceptions.TimeoutException('Timed out while waiting to '
                                                  'verify metadata on server. '
                                                  '%s is empty.' % md_url)

            # Also, test a POST
            md_url = 'http://169.254.169.254/openstack/2013-10-17/password'
            data = data_utils.arbitrary_string(100)
            cmd = 'curl -X POST -d ' + data + ' ' + md_url
            self.ssh_client.exec_command(cmd)
            result = self.servers_client.show_password(self.instance['id'])
            self.assertEqual(data, result['password'])
Ejemplo n.º 31
0
    def make_instance_statistic(self, instance, metrics=dict()):
        """Create instance resource and its measures in Gnocchi DB

        :param instance: Instance response body
        :param metrics: The metrics add to resource when using Gnocchi
        """
        all_flavors = self.flavors_client.list_flavors()['flavors']
        flavor_name = instance['flavor']['original_name']
        flavor = [f for f in all_flavors if f['name'] == flavor_name]
        if metrics == dict():
            metrics = {'cpu_util': {'archive_policy_name': 'low'}}
        resource_params = {
            'type': 'instance',
            'metrics': metrics,
            'host': instance.get('OS-EXT-SRV-ATTR:hypervisor_hostname'),
            'display_name': instance.get('OS-EXT-SRV-ATTR:instance_name'),
            'image_ref': instance['image']['id'],
            'flavor_id': flavor[0]['id'],
            'flavor_name': flavor_name,
            'id': instance['id']
        }
        _, res = self.create_resource(**resource_params)
        metric_uuid = res['metrics']['cpu_util']
        self.add_measures(metric_uuid, self._make_measures(3, 5))

        self.assertTrue(
            test_utils.call_until_true(func=functools.partial(
                self._show_measures, metric_uuid),
                                       duration=600,
                                       sleep_for=2))
Ejemplo n.º 32
0
    def verify_metadata_from_api(self, server, ssh_client, verify_method):
        md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
        LOG.info(
            'Attempting to verify tagged devices in server %s via '
            'the metadata service: %s', server['id'], md_url)

        def get_and_verify_metadata():
            try:
                ssh_client.exec_command('curl -V')
            except exceptions.SSHExecCommandFailed:
                if not CONF.compute_feature_enabled.config_drive:
                    raise self.skipException('curl not found in guest '
                                             'and config drive is '
                                             'disabled')
                LOG.warning('curl was not found in the guest, device '
                            'tagging metadata was not checked in the '
                            'metadata API')
                return True
            cmd = 'curl %s' % md_url
            md_json = ssh_client.exec_command(cmd)
            return verify_method(md_json)

        # NOTE(gmann) Keep refreshing the metadata info until the metadata
        # cache is refreshed. For safer side, we will go with wait loop of
        # build_interval till build_timeout. verify_method() above will return
        # True if all metadata verification is done as expected.
        if not test_utils.call_until_true(get_and_verify_metadata,
                                          CONF.compute.build_timeout,
                                          CONF.compute.build_interval):
            raise exceptions.TimeoutException('Timeout while verifying '
                                              'metadata on server.')
Ejemplo n.º 33
0
    def _check_remote_connectivity(self,
                                   source,
                                   dest,
                                   should_succeed=True,
                                   nic=None):
        """check ping server via source ssh connection

        :param source: RemoteClient: an ssh connection from which to ping
        :param dest: and IP to ping against
        :param should_succeed: boolean should ping succeed or not
        :param nic: specific network interface to ping from
        :returns: boolean -- should_succeed == ping
        :returns: ping is false if ping failed
        """
        def ping_remote():
            try:
                source.ping_host(dest, nic=nic)
            except lib_exc.SSHExecCommandFailed:
                LOG.warning(
                    'Failed to ping IP: %s via a ssh connection '
                    'from: %s.', dest, source.ssh_client.host)
                return not should_succeed
            return should_succeed

        return test_utils.call_until_true(ping_remote,
                                          CONF.validation.ping_timeout, 1)
Ejemplo n.º 34
0
 def test_update_no_error(self):
     if not test_utils.call_until_true(
             func=lambda: self.check_datasource_no_error('glancev2'),
             duration=30,
             sleep_for=5):
         raise exceptions.TimeoutException('Datasource could not poll '
                                           'without error.')
Ejemplo n.º 35
0
def wait_node_instance_association(client,
                                   instance_uuid,
                                   timeout=None,
                                   interval=None):
    """Waits for a node to be associated with instance_id.

    :param client: an instance of tempest plugin BaremetalClient.
    :param instance_uuid: UUID of the instance.
    :param timeout: the timeout after which the check is considered as failed.
        Defaults to CONF.baremetal.association_timeout.
    :param interval: an interval between show_node calls for status check.
        Defaults to client.build_interval.
    """
    timeout, interval = _determine_and_check_timeout_interval(
        timeout, CONF.baremetal.association_timeout, interval,
        client.build_interval)

    def is_some_node_associated():
        node = utils.get_node(client, instance_uuid=instance_uuid)
        return node is not None

    if not test_utils.call_until_true(is_some_node_associated, timeout,
                                      interval):
        msg = ('Timed out waiting to get Ironic node by instance UUID '
               '%(instance_uuid)s within the required time (%(timeout)s s).' %
               {
                   'instance_uuid': instance_uuid,
                   'timeout': timeout
               })
        raise lib_exc.TimeoutException(msg)
Ejemplo n.º 36
0
    def get_and_reserve_node(cls, node=None):
        """Pick an available node for deployment and reserve it.

        Only one instance_uuid may be associated, use this behaviour as
        reservation node when tests are launched concurrently. If node is
        not passed directly pick random available for deployment node.

        :param node: Ironic node to associate instance_uuid with.
        :returns: Ironic node.
        """
        instance_uuid = uuidutils.generate_uuid()
        nodes = []

        def _try_to_associate_instance():
            n = node or cls.get_random_available_node()
            try:
                cls._associate_instance_with_node(n['uuid'], instance_uuid)
                nodes.append(n)
            except lib_exc.Conflict:
                return False
            return True

        if (not test_utils.call_until_true(_try_to_associate_instance,
            duration=CONF.baremetal.association_timeout, sleep_for=1)):
            msg = ('Timed out waiting to associate instance to ironic node '
                   'uuid %s' % instance_uuid)
            raise lib_exc.TimeoutException(msg)

        return nodes[0]
Ejemplo n.º 37
0
 def test_call_until_true_when_f_never_returns_true(self, m_time, m_sleep):
     timeout = 42  # The value doesn't matter as we mock time.time()
     sleep = 60  # The value doesn't matter as we mock time.sleep()
     m_time.side_effect = utils.generate_timeout_series(timeout)
     self.assertEqual(
         False, test_utils.call_until_true(lambda: False, timeout, sleep)
     )
     m_sleep.call_args_list = [mock.call(sleep)] * 2
     m_time.call_args_list = [mock.call()] * 2
Ejemplo n.º 38
0
 def test_call_until_true_when_f_returns_true(self, m_time, m_sleep):
     timeout = 42  # The value doesn't matter as we mock time.time()
     sleep = 60  # The value doesn't matter as we mock time.sleep()
     m_time.return_value = 0
     self.assertEqual(
         True, test_utils.call_until_true(lambda: True, timeout, sleep)
     )
     self.assertEqual(0, m_sleep.call_count)
     self.assertEqual(1, m_time.call_count)
Ejemplo n.º 39
0
    def wait_disassociate(self):
        cli = self.manager.compute_floating_ips_client

        def func():
            floating = (cli.show_floating_ip(self.floating['id'])
                        ['floating_ip'])
            return floating['instance_id'] is None

        if not test_utils.call_until_true(func, self.check_timeout,
                                          self.check_interval):
            raise RuntimeError("IP disassociate timeout!")
Ejemplo n.º 40
0
    def check_icmp_echo(self):
        self.logger.info("%s(%s): Pinging..",
                         self.server_id, self.floating['ip'])

        def func():
            return self.ping_ip_address(self.floating['ip'])
        if not test_utils.call_until_true(func, self.check_timeout,
                                          self.check_interval):
            raise RuntimeError("%s(%s): Cannot ping the machine.",
                               self.server_id, self.floating['ip'])
        self.logger.info("%s(%s): pong :)",
                         self.server_id, self.floating['ip'])
Ejemplo n.º 41
0
    def _wait_for_volume_available_on_the_system(self, ip_address,
                                                 private_key):
        ssh = self.get_remote_client(ip_address, private_key=private_key)

        def _func():
            part = ssh.get_partitions()
            LOG.debug("Partitions:%s" % part)
            return CONF.compute.volume_device_name in part

        if not test_utils.call_until_true(_func,
                                          CONF.compute.build_timeout,
                                          CONF.compute.build_interval):
            raise lib_exc.TimeoutException
Ejemplo n.º 42
0
    def _run_and_wait(self, key, data, version,
                      content_type='application/json', headers=None):

        headers = base._get_headers(headers, content_type)

        def wait():
            return self.logs_search_client.count_search_messages(key, headers) > 0

        self.assertEqual(0, self.logs_search_client.count_search_messages(key, headers),
                         'Find log message in elasticsearch: {0}'.format(key))

        headers = base._get_headers(headers, content_type)
        data = base._get_data(data, content_type, version=version)

        response, _ = self.logs_clients[version].send_single_log(data, headers)
        self.assertEqual(204, response.status)

        test_utils.call_until_true(wait, _RETRY_COUNT * _RETRY_WAIT, _RETRY_WAIT)
        response = self.logs_search_client.search_messages(key, headers)
        self.assertEqual(1, len(response))

        return response
Ejemplo n.º 43
0
    def _prepare_and_test(self, address6_mode, n_subnets6=1, dualnet=False):
        net_list = self.prepare_network(address6_mode=address6_mode,
                                        n_subnets6=n_subnets6,
                                        dualnet=dualnet)

        sshv4_1, ips_from_api_1, srv1 = self.prepare_server(networks=net_list)
        sshv4_2, ips_from_api_2, srv2 = self.prepare_server(networks=net_list)

        def guest_has_address(ssh, addr):
            return addr in ssh.exec_command("ip address")

        # Turn on 2nd NIC for Cirros when dualnet
        if dualnet:
            _, network_v6 = net_list
            self.turn_nic6_on(sshv4_1, srv1['id'], network_v6['id'])
            self.turn_nic6_on(sshv4_2, srv2['id'], network_v6['id'])

        # get addresses assigned to vNIC as reported by 'ip address' utility
        ips_from_ip_1 = sshv4_1.exec_command("ip address")
        ips_from_ip_2 = sshv4_2.exec_command("ip address")
        self.assertIn(ips_from_api_1['4'], ips_from_ip_1)
        self.assertIn(ips_from_api_2['4'], ips_from_ip_2)
        for i in range(n_subnets6):
            # v6 should be configured since the image supports it
            # It can take time for ipv6 automatic address to get assigned
            for srv, ssh, ips in (
                    (srv1, sshv4_1, ips_from_api_1),
                    (srv2, sshv4_2, ips_from_api_2)):
                ip = ips['6'][i]
                result = test_utils.call_until_true(
                    guest_has_address,
                    CONF.validation.ping_timeout, 1, ssh, ip)
                if not result:
                    self._log_console_output(servers=[srv])
                    self.fail(
                        'Address %s not configured for instance %s, '
                        'ip address output is\n%s' %
                        (ip, srv['id'], ssh.exec_command("ip address")))

        self.check_remote_connectivity(sshv4_1, ips_from_api_2['4'])
        self.check_remote_connectivity(sshv4_2, ips_from_api_1['4'])

        for i in range(n_subnets6):
            self.check_remote_connectivity(sshv4_1,
                                           ips_from_api_2['6'][i])
            self.check_remote_connectivity(sshv4_1,
                                           self.subnets_v6[i]['gateway_ip'])
            self.check_remote_connectivity(sshv4_2,
                                           ips_from_api_1['6'][i])
            self.check_remote_connectivity(sshv4_2,
                                           self.subnets_v6[i]['gateway_ip'])
Ejemplo n.º 44
0
    def _test_call_until_true(self, return_values, duration, time_sequence,
                              args=None, kwargs=None):
        """Test call_until_true function

        :param return_values: list of booleans values to be returned
        each time given function is called. If any of these values
        is not consumed by calling the function the test fails.
        The list must contain a sequence of False items terminated
        by a single True or False
        :param duration: parameter passed to call_until_true function
        (a floating point value).
        :param time_sequence: sequence of time values returned by
        mocked time.time function used to trigger call_until_true
        behavior when handling timeout condition. The sequence must
        contain the exact number of values expected to be consumed by
        each time call_until_true calls time.time function.
        :param args: sequence of positional arguments to be passed
        to call_until_true function.
        :param kwargs: sequence of named arguments to be passed
        to call_until_true function.
        """

        # all values except the last are False
        self.assertEqual([False] * len(return_values[:-1]), return_values[:-1])
        # last value can be True or False
        self.assertIn(return_values[-1], [True, False])

        # GIVEN
        func = mock.Mock(side_effect=return_values)
        sleep = 10.  # this value has no effect as time.sleep is being mocked
        sleep_func = self.patch('time.sleep')
        time_func = self._patch_time(time_sequence)
        args = args or tuple()
        kwargs = kwargs or dict()

        # WHEN
        result = test_utils.call_until_true(func, duration, sleep,
                                            *args, **kwargs)
        # THEN

        # It must return last returned value
        self.assertIs(return_values[-1], result)

        self._test_func_calls(func, return_values, *args, **kwargs)
        self._test_sleep_calls(sleep_func, return_values, sleep)
        # The number of times time.time is called is not relevant as a
        # requirement of call_until_true. What is instead relevant is that
        # call_until_true use a mocked function to make the test reliable
        # and the test actually provide the right sequence of numbers to
        # reproduce the behavior has to be tested
        self._assert_called_n_times(time_func, len(time_sequence))
 def call_until_valid(self, func, duration, *args, **kwargs):
     # Call until get valid response for "duration"
     # because tenant usage doesn't become available immediately
     # after create VM.
     def is_valid():
         try:
             self.resp = func(*args, **kwargs)
             return True
         except e.InvalidHTTPResponseBody:
             return False
     self.assertEqual(test_utils.call_until_true(is_valid, duration, 1),
                      True, "%s not return valid response in %s secs" % (
                          func.__name__, duration))
     return self.resp
    def _wait_until_ready(self, fwg_id):
        target_states = ('ACTIVE', 'CREATED')

        def _wait():
            firewall_group = self.firewall_groups_client.show_firewall_group(
                fwg_id)
            firewall_group = firewall_group['firewall_group']
            return firewall_group['status'] in target_states

        if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
                                          CONF.network.build_interval):
            m = ("Timed out waiting for firewall_group %s to reach %s "
                 "state(s)" %
                 (fwg_id, target_states))
            raise lib_exc.TimeoutException(m)
 def part_wait(self, num_match):
     def _part_state():
         self.partitions = self.remote_client.get_partitions().split('\n')
         matching = 0
         for part_line in self.partitions[1:]:
             if self.part_line_re.match(part_line):
                 matching += 1
         return matching == num_match
     if test_utils.call_until_true(_part_state,
                                   CONF.compute.build_timeout,
                                   CONF.compute.build_interval):
         return
     else:
         raise RuntimeError("Unexpected partitions: %s",
                            str(self.partitions))
    def _wait_until_deleted(self, fwg_id):
        def _wait():
            try:
                fwg = self.firewall_groups_client.show_firewall_group(fwg_id)
            except lib_exc.NotFound:
                return True

            fwg_status = fwg['firewall_group']['status']
            if fwg_status == 'ERROR':
                raise lib_exc.DeleteErrorException(resource_id=fwg_id)

        if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
                                          CONF.network.build_interval):
            m = ("Timed out waiting for firewall_group %s deleted" % fwg_id)
            raise lib_exc.TimeoutException(m)
Ejemplo n.º 49
0
    def _check_remote_connectivity(self, source, dest, should_succeed=True,
                                   nic=None, mtu=None, fragmentation=True):
        """check ping server via source ssh connection

        :param source: RemoteClient: an ssh connection from which to ping
        :param dest: and IP to ping against
        :param should_succeed: boolean should ping succeed or not
        :param nic: specific network interface to ping from
        :param mtu: mtu size for the packet to be sent
        :param fragmentation: Flag for packet fragmentation
        :returns: boolean -- should_succeed == ping
        :returns: ping is false if ping failed
        """
        def ping_host(source, host, count=CONF.validation.ping_count,
                      size=CONF.validation.ping_size, nic=None, mtu=None,
                      fragmentation=True):
            addr = netaddr.IPAddress(host)
            cmd = 'ping6' if addr.version == 6 else 'ping'
            if nic:
                cmd = 'sudo {cmd} -I {nic}'.format(cmd=cmd, nic=nic)
            if mtu:
                if not fragmentation:
                    cmd += ' -M do'
                size = str(net_utils.get_ping_payload_size(
                    mtu=mtu, ip_version=addr.version))
            cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
            return source.exec_command(cmd)

        def ping_remote():
            try:
                result = ping_host(source, dest, nic=nic, mtu=mtu,
                                   fragmentation=fragmentation)

            except lib_exc.SSHExecCommandFailed:
                LOG.warning('Failed to ping IP: %s via a ssh connection '
                            'from: %s.', dest, source.host)
                return not should_succeed
            LOG.debug('ping result: %s', result)
            # Assert that the return traffic was from the correct
            # source address.
            from_source = 'from %s' % dest
            self.assertIn(from_source, result)
            return should_succeed

        return test_utils.call_until_true(ping_remote,
                                          CONF.validation.ping_timeout,
                                          1)
Ejemplo n.º 50
0
    def create_and_add_security_group_to_server(self, server):
        secgroup = self._create_security_group()
        self.servers_client.add_security_group(server['id'],
                                               name=secgroup['name'])
        self.addCleanup(self.servers_client.remove_security_group,
                        server['id'], name=secgroup['name'])

        def wait_for_secgroup_add():
            body = (self.servers_client.show_server(server['id'])
                    ['server'])
            return {'name': secgroup['name']} in body['security_groups']

        if not test_utils.call_until_true(wait_for_secgroup_add,
                                          CONF.compute.build_timeout,
                                          CONF.compute.build_interval):
            msg = ('Timed out waiting for adding security group %s to server '
                   '%s' % (secgroup['id'], server['id']))
            raise exceptions.TimeoutException(msg)
Ejemplo n.º 51
0
    def verify_metadata(self):
        if self.run_ssh and CONF.compute_feature_enabled.metadata_service:
            # Verify metadata service
            md_url = 'http://169.254.169.254/latest/meta-data/public-ipv4'

            def exec_cmd_and_verify_output():
                cmd = 'curl ' + md_url
                result = self.ssh_client.exec_command(cmd)
                if result:
                    msg = ('Failed while verifying metadata on server. Result '
                           'of command "%s" is NOT "%s".' % (cmd, self.fip))
                    self.assertEqual(self.fip, result, msg)
                    return 'Verification is successful!'

            if not test_utils.call_until_true(exec_cmd_and_verify_output,
                                              CONF.compute.build_timeout,
                                              CONF.compute.build_interval):
                raise exceptions.TimeoutException('Timed out while waiting to '
                                                  'verify metadata on server. '
                                                  '%s is empty.' % md_url)
Ejemplo n.º 52
0
    def _check_remote_connectivity(self, source, dest, should_succeed=True):
        """check ping server via source ssh connection

        :param source: RemoteClient: an ssh connection from which to ping
        :param dest: and IP to ping against
        :param should_succeed: boolean should ping succeed or not
        :returns: boolean -- should_succeed == ping
        :returns: ping is false if ping failed
        """
        def ping_remote():
            try:
                self.ping_host(source, dest)
            except lib_exc.SSHExecCommandFailed:
                LOG.warning(_LW('Failed to ping IP: %(dest)s '
                                'via a ssh connection from: %(source)s.'),
                            {'dest': dest, 'source': source})
                return not should_succeed
            return should_succeed

        return test_utils.call_until_true(ping_remote,
                                          CONF.validation.ping_timeout, 1)
Ejemplo n.º 53
0
def wait_for_bm_node_status(client, node_id, attr, status, timeout=None,
                            interval=None):
    """Waits for a baremetal node attribute to reach given status.

    :param client: an instance of tempest plugin BaremetalClient.
    :param node_id: identifier of the node.
    :param attr: node's API-visible attribute to check status of.
    :param status: desired status. Can be a list of statuses.
    :param timeout: the timeout after which the check is considered as failed.
        Defaults to client.build_timeout.
    :param interval: an interval between show_node calls for status check.
        Defaults to client.build_interval.

    The client should have a show_node(node_id) method to get the node.
    """
    timeout, interval = _determine_and_check_timeout_interval(
        timeout, client.build_timeout, interval, client.build_interval)

    if not isinstance(status, list):
        status = [status]

    def is_attr_in_status():
        node = utils.get_node(client, node_id=node_id)
        if node[attr] in status:
            return True
        return False

    if not test_utils.call_until_true(is_attr_in_status, timeout,
                                      interval):
        message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
                   'within the required time (%(timeout)s s).' %
                   {'node_id': node_id,
                    'attr': attr,
                    'status': status,
                    'timeout': timeout})
        caller = test_utils.find_test_caller()
        if caller:
            message = '(%s) %s' % (caller, message)
        raise lib_exc.TimeoutException(message)
    def test_net_ip_availability_after_port_delete(self):
        net_name = data_utils.rand_name('network-')
        network = self.create_network(network_name=net_name)
        self.addCleanup(self.client.delete_network, network['id'])
        subnet, prefix = self._create_subnet(network, self._ip_version)
        self.addCleanup(self.client.delete_subnet, subnet['id'])
        port = self.client.create_port(network_id=network['id'])
        self.addCleanup(self._cleanUp_port, port['port']['id'])
        net_availability = self.admin_client.list_network_ip_availabilities()
        used_ip = self._get_used_ips(network, net_availability)
        self.client.delete_port(port['port']['id'])

        def get_net_availability():
            availabilities = self.admin_client.list_network_ip_availabilities()
            used_ip_after_port_delete = self._get_used_ips(network,
                                                           availabilities)
            return used_ip - 1 == used_ip_after_port_delete

        self.assertTrue(
            test_utils.call_until_true(
                get_net_availability, DELETE_TIMEOUT, DELETE_SLEEP),
            msg="IP address did not become available after port delete")
    def _confirm_notifications(self, container_name, obj_name):
        # NOTE: Loop seeking for appropriate notifications about the containers
        # and objects sent to swift.

        def _check_samples():
            # NOTE: Return True only if we have notifications about some
            # containers and some objects and the notifications are about
            # the expected containers and objects.
            # Otherwise returning False will case _check_samples to be
            # called again.
            results = self.telemetry_client.list_samples(
                'storage.objects.incoming.bytes')
            LOG.debug('got samples %s', results)

            # Extract container info from samples.
            containers, objects = [], []
            for sample in results:
                meta = sample['resource_metadata']
                if meta.get('container') and meta['container'] != 'None':
                    containers.append(meta['container'])
                elif (meta.get('target.metadata:container') and
                      meta['target.metadata:container'] != 'None'):
                    containers.append(meta['target.metadata:container'])

                if meta.get('object') and meta['object'] != 'None':
                    objects.append(meta['object'])
                elif (meta.get('target.metadata:object') and
                      meta['target.metadata:object'] != 'None'):
                    objects.append(meta['target.metadata:object'])

            return (container_name in containers and obj_name in objects)

        self.assertTrue(
            test_utils.call_until_true(_check_samples,
                                       CONF.telemetry.notification_wait,
                                       CONF.telemetry.notification_sleep),
            'Correct notifications were not received after '
            '%s seconds.' % CONF.telemetry.notification_wait)
Ejemplo n.º 56
0
    def ping_ip_address(self, ip_address, should_succeed=True,
                        ping_timeout=None, mtu=None):
        timeout = ping_timeout or CONF.validation.ping_timeout
        cmd = ['ping', '-c1', '-w1']

        if mtu:
            cmd += [
                # don't fragment
                '-M', 'do',
                # ping receives just the size of ICMP payload
                '-s', str(net_utils.get_ping_payload_size(mtu, 4))
            ]
        cmd.append(ip_address)

        def ping():
            proc = subprocess.Popen(cmd,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc.communicate()

            return (proc.returncode == 0) == should_succeed

        caller = test_utils.find_test_caller()
        LOG.debug("{caller} begins to ping {ip} in {timeout} sec and the"
                  " expected result is {should_succeed}"
                  .format(caller=caller,
                          ip=ip_address,
                          timeout=timeout,
                          should_succeed=('reachable' if should_succeed
                                          else 'unreachable')))
        result = test_utils.call_until_true(ping, timeout, 1)
        LOG.debug("{caller} finishes ping {ip} in {timeout} sec and the "
                  "ping result is {result}"
                  .format(caller=caller,
                          ip=ip_address,
                          timeout=timeout,
                          result='expected' if result else 'unexpected'))
        return result
Ejemplo n.º 57
0
    def _check_remote_connectivity(self, source, dest, should_succeed=True,
                                   nic=None):
        """check ping server via source ssh connection

        :param source: RemoteClient: an ssh connection from which to ping
        :param dest: and IP to ping against
        :param should_succeed: boolean should ping succeed or not
        :param nic: specific network interface to ping from
        :returns: boolean -- should_succeed == ping
        :returns: ping is false if ping failed
        """
        def ping_remote():
            try:
                source.ping_host(dest, nic=nic)
            except lib_exc.SSHExecCommandFailed:
                LOG.warning('Failed to ping IP: %s via a ssh connection '
                            'from: %s.', dest, source.ssh_client.host)
                return not should_succeed
            return should_succeed

        return test_utils.call_until_true(ping_remote,
                                          CONF.validation.ping_timeout,
                                          1)
Ejemplo n.º 58
0
    def ping_ip_address(self, ip_address, should_succeed=True,
                        ping_timeout=None, mtu=None):
        # the code is taken from tempest/scenario/manager.py in tempest git
        timeout = ping_timeout or CONF.validation.ping_timeout
        cmd = ['ping', '-c1', '-w1']

        if mtu:
            cmd += [
                # don't fragment
                '-M', 'do',
                # ping receives just the size of ICMP payload
                '-s', str(net_utils.get_ping_payload_size(mtu, 4))
            ]
        cmd.append(ip_address)

        def ping():
            proc = subprocess.Popen(cmd,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc.communicate()

            return (proc.returncode == 0) == should_succeed

        caller = test_utils.find_test_caller()
        LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
                  ' expected result is %(should_succeed)s', {
                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
                      'should_succeed':
                      'reachable' if should_succeed else 'unreachable'
                  })
        result = test_utils.call_until_true(ping, timeout, 1)
        LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
                  'ping result is %(result)s', {
                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
                      'result': 'expected' if result else 'unexpected'
                  })
        return result
Ejemplo n.º 59
0
    def test_minimum_basic_scenario(self):
        image = self.glance_image_create()
        keypair = self.create_keypair()

        server = self.create_server(image_id=image, key_name=keypair['name'])
        servers = self.servers_client.list_servers()['servers']
        self.assertIn(server['id'], [x['id'] for x in servers])

        self.nova_show(server)

        volume = self.create_volume()
        volumes = self.volumes_client.list_volumes()['volumes']
        self.assertIn(volume['id'], [x['id'] for x in volumes])

        self.cinder_show(volume)

        volume = self.nova_volume_attach(server, volume)
        self.addCleanup(self.nova_volume_detach, server, volume)
        self.cinder_show(volume)

        floating_ip = None
        server = self.servers_client.show_server(server['id'])['server']
        if (CONF.network_feature_enabled.floating_ips and
            CONF.network.floating_network_name):
            floating_ip = self.create_floating_ip(server)
            # fetch the server again to make sure the addresses were refreshed
            # after associating the floating IP
            server = self.servers_client.show_server(server['id'])['server']
            address = self._get_floating_ip_in_server_addresses(
                floating_ip, server)
            self.assertIsNotNone(
                address,
                "Failed to find floating IP '%s' in server addresses: %s" %
                (floating_ip['ip'], server['addresses']))
            ssh_ip = floating_ip['ip']
        else:
            ssh_ip = self.get_server_ip(server)

        self.create_and_add_security_group_to_server(server)

        # check that we can SSH to the server before reboot
        self.linux_client = self.get_remote_client(
            ssh_ip, private_key=keypair['private_key'],
            server=server)

        self.nova_reboot(server)

        # check that we can SSH to the server after reboot
        # (both connections are part of the scenario)
        self.linux_client = self.get_remote_client(
            ssh_ip, private_key=keypair['private_key'],
            server=server)

        self.check_disks()

        if floating_ip:
            # delete the floating IP, this should refresh the server addresses
            self.compute_floating_ips_client.delete_floating_ip(
                floating_ip['id'])

            def is_floating_ip_detached_from_server():
                server_info = self.servers_client.show_server(
                    server['id'])['server']
                address = self._get_floating_ip_in_server_addresses(
                    floating_ip, server_info)
                return (not address)

            if not test_utils.call_until_true(
                is_floating_ip_detached_from_server,
                CONF.compute.build_timeout,
                CONF.compute.build_interval):
                msg = ("Floating IP '%s' should not be in server addresses: %s"
                       % (floating_ip['ip'], server['addresses']))
                raise exceptions.TimeoutException(msg)