def disrupt_all_controller_nodes(disrupt_method=sh.hard_reset_method,
                                 sequentially=False,
                                 exclude_list=None):
    # reboot all controllers and wait for ssh Up on them
    # method : method of disruptino to use : reset | network_disruption
    # hard reset is simultaneous while soft is sequential
    # exclude_list = list of nodes to NOT reset

    controlplane_groups = ['controller', 'messaging', 'database', 'networker']
    actual_controlplane_groups = tripleo_topology.actual_node_groups(
        controlplane_groups)
    nodes = topology.list_openstack_nodes(group=actual_controlplane_groups)

    # remove excluded nodes from reset list
    if exclude_list:
        nodes = [node for node in nodes if node.name not in exclude_list]

    for controller in nodes:
        if isinstance(disrupt_method, sh.RebootHostMethod):
            reboot_node(controller.name,
                        wait=sequentially,
                        reboot_method=disrupt_method)
        else:
            # using ssh_client.connect we use a fire and forget reboot method
            controller.ssh_client.connect().exec_command(disrupt_method)
            LOG.info('disrupt exec: {} on server: {}'.format(
                disrupt_method, controller.name))
            tobiko.cleanup_fixture(controller.ssh_client)
            if sequentially:
                check_overcloud_node_responsive(controller)
    if not sequentially:
        for controller in topology.list_openstack_nodes(group='controller'):
            check_overcloud_node_responsive(controller)
def reboot_all_controller_nodes(reboot_method=sh.hard_reset_method,
                                sequentially=False,
                                exclude_list=None):
    # reboot all controllers and wait for ssh Up on them
    # method : method of disruptino to use : hard or soft reset
    # hard reset is simultaneous while soft is sequential
    # exclude_list = list of nodes to NOT reset

    controlplane_groups = ['controller', 'messaging', 'database', 'networker']
    actual_controlplane_groups = tripleo_topology.actual_node_groups(
        controlplane_groups)
    nodes = topology.list_openstack_nodes(group=actual_controlplane_groups)

    # remove excluded nodes from reset list
    if exclude_list:
        nodes = [node for node in nodes if node.name not in exclude_list]

    for controller in nodes:
        sh.reboot_host(ssh_client=controller.ssh_client,
                       wait=sequentially,
                       method=reboot_method)
        LOG.info('reboot exec: {} on server: {}'.format(
            reboot_method, controller.name))
        tobiko.cleanup_fixture(controller.ssh_client)
    if not sequentially:
        for controller in topology.list_openstack_nodes(group='controller'):
            check_overcloud_node_responsive(controller)
def kill_rabbitmq_service():
    """kill a rabbit process on a random controller,
    check in pacemaker it is down"""
    if tripleo_topology.is_composable_roles_env():
        nodes = topology.list_openstack_nodes(group='messaging')
    else:
        nodes = topology.list_openstack_nodes(group='controller')
    node = random.choice(nodes)
    sh.execute(kill_rabbit, ssh_client=node.ssh_client)
    LOG.info('kill rabbit: {} on server: {}'.format(kill_rabbit, node.name))
    retry = tobiko.retry(timeout=30, interval=5)
    for _ in retry:
        if not (pacemaker.PacemakerResourcesStatus().rabbitmq_resource_healthy(
        )):
            return
Example #4
0
    def _start_octavia_main_services(
            self, controllers: typing.List[OpenStackTopologyNode] = None):
        """Starts the provided octavia services.

        This method starts the provided octavia services.
        After it runs the "start command" (e.g. `systemctl start`), it makes
        sure that the Octavia services appear on the active Octavia units.

        It then sends traffic to validate the Octavia's functionality
        """

        controllers = controllers or topology.list_openstack_nodes(
            group='controller')
        for controller in controllers:

            # Starting the Octavia services
            for service in octavia.OCTAVIA_SERVICES:
                sh.execute(f"systemctl start {service}",
                           ssh_client=controller.ssh_client,
                           sudo=True)

            # Making sure the Octavia services were started
            self._make_sure_octavia_services_are_active(controller)

        octavia.check_members_balanced(
            pool_id=self.listener_stack.pool_id,
            ip_address=self.loadbalancer_stack.floating_ip_address,
            lb_algorithm=self.listener_stack.lb_algorithm,
            protocol=self.listener_stack.lb_protocol,
            port=self.listener_stack.lb_port)
def kill_all_galera_services():
    """kill all galera processes,
    check in pacemaker it is down"""
    if tripleo_topology.is_composable_roles_env():
        nodes = topology.list_openstack_nodes(group='database')
    else:
        nodes = topology.list_openstack_nodes(group='controller')
    for node in nodes:
        sh.execute(kill_galera, ssh_client=node.ssh_client)
        LOG.info('kill galera: {} on server: {}'.format(
            kill_galera, node.name))
    retry = tobiko.retry(timeout=30, interval=5)
    for _ in retry:
        if not (pacemaker.PacemakerResourcesStatus().galera_resource_healthy()
                ):
            return
Example #6
0
 def test_list_openstack_topology(self, group=None, hostnames=None):
     nodes = topology.list_openstack_nodes(
         topology=self.topology, group=group, hostnames=hostnames)
     self.assertTrue(set(nodes).issubset(set(self.topology.nodes)))
     self.assertEqual(len(set(nodes)), len(nodes),
                      f"Repeated node found: {nodes}")
     for node in nodes:
         if isinstance(group, str):
             self.assertIn(group, node.groups)
         elif isinstance(group, PatternType):
             for actual_group in node.groups:
                 if group.match(actual_group):
                     break
             else:
                 self.fail(f"Any node {node.name} group matches "
                           f"'{group}': {node.groups}")
         elif isinstance(group, abc.Iterable):
             matching_groups = set(group) & set(node.groups)
             self.assertNotEqual(set(), matching_groups,
                                 f"Any group of node {node.name} "
                                 f"matches '{group}': {node.groups}")
         if hostnames:
             hostnames = [node_name_from_hostname(h)
                          for h in hostnames]
             self.assertIn(node.name, hostnames)
     return nodes
Example #7
0
def ovn_dbs_vip_bindings(test_case):
    # commands to obtain OVN SB and NB connection strings
    get_ovn_nb_conn_cmd = (
        'crudini --get /var/lib/config-data/puppet-generated/neutron/etc/'
        'neutron/plugins/ml2/ml2_conf.ini ovn ovn_nb_connection')
    get_ovn_sb_conn_cmd = get_ovn_nb_conn_cmd.replace('ovn_nb_connection',
                                                      'ovn_sb_connection')

    controllers = topology.list_openstack_nodes(group='controller')
    ovn_conn_str = {}
    ovn_conn_str['nb'] = sh.execute(get_ovn_nb_conn_cmd,
                                    ssh_client=controllers[0].ssh_client,
                                    sudo=True).stdout.splitlines()[0]
    ovn_conn_str['sb'] = sh.execute(get_ovn_sb_conn_cmd,
                                    ssh_client=controllers[0].ssh_client,
                                    sudo=True).stdout.splitlines()[0]
    ovn_conn = {}
    for db in ('nb', 'sb'):
        ovn_conn[db] = {}
        ipv6 = re.findall(r'\[.*\]', ovn_conn_str[db])
        if len(ipv6) == 1:
            ovn_conn[db]['ip'] = ipv6[0]
        elif len(ipv6) == 0:
            ovn_conn[db]['ip'] = ovn_conn_str[db].split(':')[1]
        else:
            raise RuntimeError('Error parsing ovn db connection string from '
                               'configuration file')
        ovn_conn[db]['port'] = ovn_conn_str[db].split(':')[-1]

    # ovn db sockets might be centrillized or distributed
    # that depends on the openstack version under test
    ovn_db_sockets_centrallized = topology.verify_osp_version('14.0',
                                                              lower=True)

    # command to obtain sockets listening on OVN SB and DB DBs
    get_ovn_db_sockets_listening_cmd = \
        "ss -p state listening 'sport = {srcport} and src {srcip}'"

    num_db_sockets = 0
    for controller in controllers:
        for db in ('nb', 'sb'):
            ovn_db_sockets_listening = sh.execute(
                get_ovn_db_sockets_listening_cmd.format(
                    srcport=ovn_conn[db]['port'], srcip=ovn_conn[db]['ip']),
                ssh_client=controller.ssh_client,
                sudo=True).stdout.splitlines()
            if ovn_db_sockets_centrallized:
                if 2 == len(ovn_db_sockets_listening):
                    num_db_sockets += 1
                    test_case.assertIn('ovsdb-server',
                                       ovn_db_sockets_listening[1])
            else:
                num_db_sockets += 1
                test_case.assertEqual(2, len(ovn_db_sockets_listening))
                test_case.assertIn('ovsdb-server', ovn_db_sockets_listening[1])

    if ovn_db_sockets_centrallized:
        test_case.assertEqual(2, num_db_sockets)
    else:
        test_case.assertEqual(2 * len(controllers), num_db_sockets)
Example #8
0
 def setup_fixture(self):
     for node in topology.list_openstack_nodes():
         network_namespaces = ip.list_network_namespaces(
             ssh_client=node.ssh_client)
         if network_namespaces:
             self.network_namespace = network_namespaces.first
             self.ssh_client = node.ssh_client
Example #9
0
def test_ovs_bridges_mac_table_size():
    test_case = tobiko.get_test_case()
    expected_mac_table_size = '50000'
    get_mac_table_size_cmd = ('ovs-vsctl get bridge {br_name} '
                              'other-config:mac-table-size')
    if neutron.has_ovn():
        get_br_mappings_cmd = ('ovs-vsctl get Open_vSwitch . '
                               'external_ids:ovn-bridge-mappings')
    else:
        get_br_mappings_cmd = (
            'crudini --get /var/lib/config-data/puppet-generated/neutron/'
            'etc/neutron/plugins/ml2/openvswitch_agent.ini '
            'ovs bridge_mappings')
    for node in topology.list_openstack_nodes(group='overcloud'):
        try:
            br_mappings_str = sh.execute(get_br_mappings_cmd,
                                         ssh_client=node.ssh_client,
                                         sudo=True).stdout.splitlines()[0]
        except sh.ShellCommandFailed:
            LOG.debug(f"bridge mappings not configured on node '{node.name}'",
                      exc_info=1)
            continue
        br_list = [
            br_mapping.split(':')[1]
            for br_mapping in br_mappings_str.replace('"', '').split(',')
        ]
        for br_name in br_list:
            mac_table_size = sh.execute(
                get_mac_table_size_cmd.format(br_name=br_name),
                ssh_client=node.ssh_client,
                sudo=True).stdout.splitlines()[0]
            test_case.assertEqual(mac_table_size.replace('"', ''),
                                  expected_mac_table_size)
Example #10
0
 def setup_fixture(self):
     for node in topology.list_openstack_nodes():
         network_namespaces = ip.list_network_namespaces(
             ignore_errors=True, ssh_client=node.ssh_client)
         if network_namespaces:
             self.network_namespace = network_namespaces[0]
             self.ssh_client = node.ssh_client
Example #11
0
def prepare_ansible_hosts_inventory():
    """create a hosts.yaml with ansible connections'
    specifications for overcloud nodes"""
    sshcu = topology.list_openstack_nodes(group='undercloud')[0].ssh_client
    sh.execute('if [ ! -f /home/stack/hosts.yaml ]; then '
               'source /home/stack/stackrc;tripleo-ansible-inventory '
               '--ansible_ssh_user heat-admin --static-yaml-inventory '
               'hosts.yaml;fi', ssh_client=sshcu, stdout=True)
Example #12
0
    def iter_ssh_clients():
        ssh_client = ssh.ssh_proxy_client()
        if isinstance(ssh_client, ssh.SSHClientFixture):
            yield ssh_client

        nodes = topology.list_openstack_nodes()
        for node in nodes:
            if isinstance(node.ssh_client, ssh.SSHClientFixture):
                yield node.ssh_client
Example #13
0
    def ovn_overcloud_processes_validations(self):
        """
        Checks that the oc_procs_df dataframe has OVN processes running on the
        expected overcloud node or nodes
        :return: Bool
        """
        if not neutron.has_ovn():
            LOG.info("Networking OVN not configured")
            return True

        for process_dict in self.ovn_processes_to_check_per_node:
            if not self.oc_procs_df.query('PROCESS=="{}"'.format(
                    process_dict['name'])).empty:
                LOG.info("overcloud processes status checks: "
                         f"process {process_dict['name']} is  "
                         "in running state")

                ovn_proc_filtered_df = self.oc_procs_df.query(
                    'PROCESS=="{}"'.format(process_dict['name']))

                if (process_dict['node_group']
                        not in topology.list_openstack_node_groups()):
                    LOG.debug(f"{process_dict['node_group']} is not "
                              "a node group part of this Openstack cloud")
                    continue
                node_list = [
                    node.name for node in topology.list_openstack_nodes(
                        group=process_dict['node_group'])
                ]
                node_names_re = re.compile(r'|'.join(node_list))
                node_filter = (ovn_proc_filtered_df.overcloud_node.str.match(
                    node_names_re))
                # obtain the processes running on a specific type of nodes
                ovn_proc_filtered_per_node_df = \
                    ovn_proc_filtered_df[node_filter]
                if type(process_dict['number']) == int:
                    assert process_dict['number'] == \
                        len(ovn_proc_filtered_per_node_df), (
                        "Unexpected number"
                        f" of processes {process_dict['name']} running on "
                        f"{process_dict['node_group']} nodes")
                elif process_dict['number'] == 'all':
                    num_nodes = len(node_list)
                    assert num_nodes == len(ovn_proc_filtered_per_node_df), (
                        "Unexpected number of processes "
                        f"{process_dict['name']} running on "
                        f"{process_dict['node_group']} nodes")
                else:
                    raise RuntimeError("Unexpected value:"
                                       f"{process_dict['node_group']}")
                # process successfully validated
                LOG.debug(f"{process_dict['name']} successfully validated on "
                          f"{process_dict['node_group']} nodes")

        # if all procs are running we can return true
        return True
Example #14
0
def run_container_config_validations():
    """check containers configuration in different scenarios
    """

    # TODO add here any generic configuration validation
    config_checkings = []

    if neutron.has_ovn():
        ovn_config_checkings = \
            [{'node_group': 'controller',
              'container_name': 'neutron_api',
              'config_file': '/etc/neutron/plugins/ml2/ml2_conf.ini',
              'param_validations': [{'section': 'ml2',
                                     'param': 'mechanism_drivers',
                                     'expected_value': 'ovn'},
                                    {'section': 'ml2',
                                     'param': 'type_drivers',
                                     'expected_value': 'geneve'},
                                    {'section': 'ovn',
                                     'param': 'ovn_l3_mode',
                                     'expected_value': 'True'},
                                    {'section': 'ovn',
                                     'param': 'ovn_metadata_enabled',
                                     'expected_value': 'True'}]}]
        config_checkings += ovn_config_checkings
    else:
        ovs_config_checkings = \
            [{'node_group': 'controller',
              'container_name': 'neutron_api',
              'config_file': '/etc/neutron/plugins/ml2/ml2_conf.ini',
              'param_validations': [{'section': 'ml2',
                                     'param': 'mechanism_drivers',
                                     'expected_value': 'openvswitch'}]}]
        config_checkings += ovs_config_checkings

    container_runtime_name = get_container_runtime_name()
    for config_check in config_checkings:
        for node in topology.list_openstack_nodes(
                group=config_check['node_group']):
            for param_check in config_check['param_validations']:
                obtained_param = sh.execute(
                    f"{container_runtime_name} exec -uroot "
                    f"{config_check['container_name']} crudini "
                    f"--get {config_check['config_file']} "
                    f"{param_check['section']} {param_check['param']}",
                    ssh_client=node.ssh_client,
                    sudo=True).stdout.strip()
                if param_check['expected_value'] not in obtained_param:
                    tobiko.fail(f"Expected {param_check['param']} value: "
                                f"{param_check['expected_value']}\n"
                                f"Obtained {param_check['param']} value: "
                                f"{obtained_param}")
        LOG.info("Configuration verified:\n"
                 f"node group: {config_check['node_group']}\n"
                 f"container: {config_check['container_name']}\n"
                 f"config file: {config_check['config_file']}")
Example #15
0
def remove_all_grastate_galera():
    """shut down galera properly,
    remove all grastate"""
    if tripleo_topology.is_composable_roles_env():
        nodes = topology.list_openstack_nodes(group='database')
    else:
        nodes = topology.list_openstack_nodes(group='controller')
    LOG.info('shut down galera: {} on all servers: {}'.format(
        disable_galera, nodes))
    if "resource 'galera' is not running on any node" not in\
            sh.execute(disable_galera, ssh_client=nodes[0].ssh_client).stdout:
        raise PcsDisableException()
    for node in nodes:
        sh.execute(remove_grastate, ssh_client=node.ssh_client)
    LOG.info('enable back galera: {} on all servers: {}'.format(
        enable_galera, nodes))
    if "resource 'galera' is master on node" not in\
            sh.execute(enable_galera, ssh_client=nodes[0].ssh_client).stdout:
        raise PcsEnableException()
Example #16
0
    def setUp(self):
        super(DockerClientTest, self).setUp()
        for node in topology.list_openstack_nodes(group='controller'):
            self.ssh_client = ssh_client = node.ssh_client
            break
        else:
            self.skip('Any controller node found from OpenStack topology')

        if not docker.is_docker_running(ssh_client=ssh_client):
            self.skip('Docker server is not running')
Example #17
0
    def ssh_client(self) -> ssh.SSHClientType:
        ssh_client = ssh.ssh_proxy_client()
        if isinstance(ssh_client, ssh.SSHClientFixture):
            return ssh_client

        nodes = topology.list_openstack_nodes()
        for node in nodes:
            if isinstance(node.ssh_client, ssh.SSHClientFixture):
                return ssh_client
        tobiko.skip_test('No such SSH server host to connect to')
Example #18
0
    def ssh_client(self) -> ssh.SSHClientFixture:
        ssh_client = ssh.ssh_proxy_client()
        if isinstance(ssh_client, ssh.SSHClientFixture):
            return ssh_client

        nodes = topology.list_openstack_nodes()
        for node in nodes:
            if isinstance(node.ssh_client, ssh.SSHClientFixture):
                return node.ssh_client

        return self.server.ssh_client
Example #19
0
 def test_list_openstack_topology(self, group=None, hostnames=None):
     nodes = topology.list_openstack_nodes(topology=self.topology,
                                           group=group,
                                           hostnames=hostnames)
     self.assertTrue(set(nodes).issubset(set(self.topology.nodes)))
     for node in nodes:
         if group:
             self.assertIn(group, node.groups)
         if hostnames:
             hostnames = [node_name_from_hostname(h) for h in hostnames]
             self.assertIn(node.name, hostnames)
     return nodes
Example #20
0
    def setup_fixture(self):
        nodes = topology.list_openstack_nodes()
        for node in nodes:
            assert node.ssh_client is not None
            if podman.is_podman_running(ssh_client=node.ssh_client):
                self.node = node
                break

        if self.node is None:
            nodes_text = ' '.join(node.name for node in nodes)
            tobiko.skip_test("Podman server is not running in any of nodes "
                             f"{nodes_text}")
Example #21
0
 def setUp(self):
     super(TestFloatingIPLogging, self).setUp()
     net = self.stack.network_id
     self.port = neutron.create_port(**{'network_id': net})
     self.addCleanup(self.cleanup_port)
     self.fip = neutron.create_floating_ip()
     self.addCleanup(self.cleanup_floatingip)
     log_filename = '/var/log/containers/neutron/server.log'
     self.log_digger = files.MultihostLogFileDigger(filename=log_filename,
                                                    sudo=True)
     for node in topology.list_openstack_nodes(group='controller'):
         self.log_digger.add_host(hostname=node.hostname,
                                  ssh_client=node.ssh_client)
Example #22
0
def reset_all_compute_nodes(hard_reset=False):

    # reboot all computes and wait for ssh Up on them
    # hard reset is simultaneous while soft is sequential
    if hard_reset:
        reset_method = sh.hard_reset_method
    else:
        reset_method = sh.soft_reset_method
    for compute in topology.list_openstack_nodes(group='compute'):
        # using ssh_client.connect we use a fire and forget reboot method
        sh.reboot_host(ssh_client=compute.ssh_client,
                       wait=False,
                       method=reset_method)
        LOG.info('reboot exec:  {} on server: {}'.format(
            reset_method, compute.name))
        tobiko.cleanup_fixture(compute.ssh_client)

    for compute in topology.list_openstack_nodes(group='compute'):
        compute_checked = sh.execute("hostname",
                                     ssh_client=compute.ssh_client,
                                     expect_exit_status=None).stdout
        LOG.info('{} is up '.format(compute_checked))
Example #23
0
def test_controllers_shutdown():
    test_case = tobiko.get_test_case()

    all_nodes = topology.list_openstack_nodes(group='controller')
    if len(all_nodes) < 3:
        tobiko.skip_test('It requires at least three controller nodes')

    all_node_names = [node.name for node in all_nodes]
    LOG.info("Ensure all controller nodes are running: " f"{all_node_names}")
    for node in all_nodes:
        node.power_on_overcloud_node()
    topology.assert_reachable_nodes(all_nodes)

    LOG.debug('Check VM is running while all controllers nodes are on')
    nova_server = tobiko.setup_fixture(stacks.CirrosServerStackFixture)
    nova_server_ip = nova_server.ip_address
    ping.assert_reachable_hosts([nova_server_ip])

    quorum_level = math.ceil(0.5 * len(all_nodes))
    assert quorum_level >= len(all_nodes) - quorum_level
    nodes = random.sample(all_nodes, quorum_level)
    node_names = [node.name for node in nodes]
    LOG.info(f"Power off {quorum_level} random controller nodes: "
             f"{node_names}")
    for node in nodes:
        node.power_off_overcloud_node()
        test_case.addCleanup(node.power_on_overcloud_node)
    topology.assert_unreachable_nodes(nodes, retry_count=1)
    topology.assert_reachable_nodes(node for node in all_nodes
                                    if node not in nodes)

    LOG.debug('Check whenever VM is still running while some "'
              '"controllers nodes are off')
    reachable, unreachable = ping.ping_hosts([nova_server_ip], count=1)
    if reachable:
        LOG.debug(f"VM ips are reachable: {reachable}")
    if unreachable:
        LOG.debug(f"VM is are unreachable: {unreachable}")
    # TODO what do we expect here: VM reachable or unreachable?

    random.shuffle(nodes)
    LOG.info(f"Power on controller nodes: {node_names}")
    for node in nodes:
        node.power_on_overcloud_node()

    LOG.debug("Check all controller nodes are running again: "
              f"{all_node_names}")
    topology.assert_reachable_nodes(all_nodes, retry_timeout=600.)

    LOG.debug('Check VM is running while all controllers nodes are on')
    ping.assert_reachable_hosts([nova_server_ip])
Example #24
0
def remove_one_grastate_galera():
    """shut down galera properly,
    delete /var/lib/mysql/grastate.dat in a random node,
    check that bootstrap is done from a node with grastate"""
    if tripleo_topology.is_composable_roles_env():
        nodes = topology.list_openstack_nodes(group='database')
    else:
        nodes = topology.list_openstack_nodes(group='controller')
    node = random.choice(nodes)
    LOG.info('disable haproxy-bunble')
    if "resource 'haproxy-bundle' is not running on any node" not in\
            sh.execute(disable_haproxy, ssh_client=node.ssh_client).stdout:
        raise PcsDisableException()
    LOG.info('shut down galera: {} on all servers: {}'.format(
        disable_galera, nodes))
    if "resource 'galera' is not running on any node" not in\
            sh.execute(disable_galera, ssh_client=node.ssh_client).stdout:
        raise PcsDisableException()
    LOG.info('remove grastate: {} on server: {}'.format(
        remove_grastate, node.name))
    sh.execute(remove_grastate, ssh_client=node.ssh_client)
    LOG.info('enable back galera: {} on all servers: {}'.format(
        enable_galera, nodes))
    if "resource 'galera' is master on node" not in\
            sh.execute(enable_galera, ssh_client=node.ssh_client).stdout:
        raise PcsEnableException()
    LOG.info('enable haproxy-bundle')
    if "resource 'haproxy-bundle' is running on node" not in\
            sh.execute(enable_haproxy, ssh_client=node.ssh_client).stdout:
        raise PcsEnableException()
    # gcomm:// without args means that bootstrap is done from this node
    bootstrap = sh.execute(check_bootstrap, ssh_client=node.ssh_client).stdout
    if re.search('wsrep-cluster-address=gcomm:// --', bootstrap) is not None:
        raise GaleraBoostrapException()
    lastDate = re.findall(
        r"\w{,3}\s*\w{,3}\s*\d{,2}\s*\d{,2}:\d{,2}:\d{,2}\s*"
        r"\d{4}", bootstrap)[-1]
    return node, lastDate
Example #25
0
def get_nodes_for_groups(groups):
    """Search for all nodes that are matched with the specified groups

    :param groups: List of groups nodes can belong to
    :type groups: list
    :return: List of nodes that belong to the specified groups
    :rtype: list of tobiko.openstack.topology.OpenStackTopologyNode
    """
    nodes = []
    for node in topology.list_openstack_nodes():
        for group in node.groups:
            if group in groups:
                nodes.append(node)
    return(nodes)
Example #26
0
def check_computes_vms_running_via_virsh():
    """check all vms are running via virsh list command"""
    for compute in topology.list_openstack_nodes(group='compute'):
        hostname = get_fqdn_from_topology_node(compute)
        retry = tobiko.retry(timeout=120, interval=5)
        for vm_id in get_compute_vms_df(hostname)['vm_id'].to_list():
            for _ in retry:
                if check_vm_running_via_virsh(compute, vm_id):
                    LOG.info(f"{vm_id} is running ok on "
                             f"{compute.hostname}")
                    break
                else:
                    LOG.info(f"{vm_id} is not in running state on "
                             f"{compute.hostname}")
Example #27
0
 def get_runtime() -> typing.Optional[ContainerRuntime]:
     """check what container runtime is running
     and return a handle to it"""
     # TODO THIS LOCKS SSH CLIENT TO CONTROLLER
     for node in topology.list_openstack_nodes(group='controller'):
         try:
             result = sh.execute('podman --version || docker --version',
                                 ssh_client=node.ssh_client)
         except sh.ShellCommandFailed:
             continue
         for runtime in CONTAINER_RUNTIMES:
             for version in [result.stdout, result.stderr]:
                 if runtime.match_version(version):
                     return runtime
     raise RuntimeError(
         "Unable to find any container runtime in any overcloud "
         "controller node")
Example #28
0
def test_ovs_interfaces_are_absent(
        group: typing.Pattern[str] = OPENSTACK_NODE_GROUP,
        interface: typing.Pattern[str] = OVS_INTERFACE):
    nodes = topology.list_openstack_nodes(group=group)

    interfaces: typing.Dict[str,
                            typing.List[str]] = (collections.defaultdict(list))
    for node in nodes:
        for node_interface in ip.list_network_interfaces(
                ssh_client=node.ssh_client, sudo=True):
            if interface.match(node_interface):
                interfaces[node.name].append(node_interface)
    interfaces = dict(interfaces)

    test_case = tobiko.get_test_case()
    test_case.assertEqual(
        {}, interfaces,
        f"OVS interface(s) found on OpenStack nodes: {interfaces}")
Example #29
0
    def setUp(self):
        # pylint: disable=no-member
        super(OctaviaServicesFaultTest, self).setUp()

        # Skip the test if there are no 3 available controllers -> e.g. Tripleo
        self.controllers = topology.list_openstack_nodes(group='controller')

        if 3 != len(self.controllers):
            skip_reason = "The number of controllers should be 3 for this test"
            self.skipTest(skip_reason)

        # Wait for Octavia objects to be active
        LOG.info('Waiting for member '
                 f'{self.listener_stack.server_stack.stack_name} and '
                 f'for member '
                 f'{self.listener_stack.other_server_stack.stack_name} '
                 f'to be created...')
        self.listener_stack.wait_for_active_members()

        self.loadbalancer_stack.wait_for_octavia_service()

        self.listener_stack.wait_for_members_to_be_reachable()

        # For 5 minutes we ignore specific exceptions as we know
        # that Octavia resources are being provisioned
        # Sending initial traffic before we stop octavia services
        for attempt in tobiko.retry(timeout=300.):
            try:
                octavia.check_members_balanced(
                    pool_id=self.listener_stack.pool_id,
                    ip_address=self.loadbalancer_stack.floating_ip_address,
                    lb_algorithm=self.listener_stack.lb_algorithm,
                    protocol=self.listener_stack.lb_protocol,
                    port=self.listener_stack.lb_port)
                break
            except (octavia.RoundRobinException, octavia.TrafficTimeoutError,
                    sh.ShellCommandFailed):
                LOG.exception(f"Traffic didn't reach all members after "
                              f"#{attempt.number} attempts and "
                              f"{attempt.elapsed_time} seconds")
                if attempt.is_last:
                    raise
Example #30
0
def list_containers(group=None):
    """get list of containers in running state
    from specified node group
    returns : a list of overcloud_node's running containers"""

    # moved here from topology
    # reason : Workaround for :
    # AttributeError: module 'tobiko.openstack.topology' has no
    # attribute 'container_runtime'

    if group is None:
        group = 'overcloud'
    containers_list = tobiko.Selection()
    openstack_nodes = topology.list_openstack_nodes(group=group)

    for node in openstack_nodes:
        LOG.debug(f"List containers for node {node.name}")
        node_containers_list = list_node_containers(ssh_client=node.ssh_client)
        containers_list.extend(node_containers_list)
    return containers_list