def _get_dpdk_nics_info(self, hypervisor_ip):
     dpdk_nics_info = []
     dpdk_nics = []
     cmd = ("sudo ovs-vsctl --columns=name,type,admin_state "
            "--format=json list interface")
     output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
     nics = json.loads(output)
     for nic in nics.get('data', []):
         if nic and str(nic[1]) == 'dpdk' and str(nic[2]) == 'up':
             dpdk_nics.append(str(nic[0]))
     if dpdk_nics:
         cmd = ("sudo ovs-vsctl --column=mac-in-use,mtu,status "
                "--format=json list interface " + ' '.join(dpdk_nics))
         output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
         nics_info = json.loads(output)
         for nic_info in nics_info.get('data', []):
             data = {}
             data['mac'] = nic_info[0]
             data['mtu'] = nic_info[1]
             for field in nic_info[2][1]:
                 if field[0] == 'numa_id':
                     data['numa_node'] = int(field[1])
                     dpdk_nic_map = self._get_dpdk_nics_mapping(
                         hypervisor_ip, nic_info[0])
                     data['nic'] = dpdk_nic_map['name']
                     data['pci'] = dpdk_nic_map['pci_address']
                     dpdk_nics_info.append(data)
     return dpdk_nics_info
    def test_offload_nic_eswitch_mode(self):
        """Check eswitch mode of nic for offload on all hypervisors

        By default, offload nics are auto discovered.
        But if the used would like to not perform the autodiscover and
        provide the nics, it could be done by modifying the
        CONF.nfv_plugin_options.offload_nics param in deployer-input file.
        """
        LOG.info('Starting offload_nic_eswitch_mode test')
        # Retrieve all hypervisors
        hypervisors = self._get_hypervisor_ip_from_undercloud()
        offload_nics = CONF.nfv_plugin_options.offload_nics
        if not offload_nics:
            LOG.info('The offload nics are not provided. Detecting...')
            offload_nics = self.discover_hw_offload_nics(hypervisors)
        LOG.info('Test the following offload nics - {}'.format(offload_nics))
        # devlink cmd to retrieve switch mode of interface
        devlink_cmd = "sudo devlink dev eswitch show pci/{}"
        # Initialize results list
        result = []
        # Expected result is a list of dicts containing a dict of
        # hypervisor's IP, its offload nics as keys and the value 'true'
        # Example:
        # [{'192.0.160.1': [{'p6p1': 'true'}, {'p6p2': 'true'}]},
        #  {'192.0.160.2': [{'p6p1': 'true'}, {'p6p2': 'true'}]}]
        expected_result = [{ip: [{nic: 'true'} for nic, _ in nics.items()]}
                           for ip, nics in offload_nics.items()]
        for hypervisor, nics in offload_nics.items():
            dev_result = []
            # Check hw-offload config on hypervisor
            hyper_check = \
                'sudo ovs-vsctl get Open_vSwitch . other_config:hw-offload'
            hyper_offload_state = shell_utils.run_command_over_ssh(hypervisor,
                                                                   hyper_check)
            if not hyper_offload_state.strip() == '"true"':
                dev_result.append('No hw-offload on hypervisor')
                result.append({hypervisor: dev_result})
                LOG.info('No hw-offload on hypervisor {}'.format(hypervisor))
                continue
            LOG.info('Hw-offload configured on hyper - {}'.format(hypervisor))
            for nic, nic_options in nics.items():
                dev_query = shell_utils.run_command_over_ssh(
                    hypervisor, devlink_cmd.format(nic_options['bus-info']))
                if 'switchdev' in dev_query:
                    output = 'true'
                else:
                    output = 'false'
                LOG.info("Hypervisor '{h}' NIC '{n}' is in switchdev mode: {r}"
                         .format(h=hypervisor, n=nic, r=output))
                dev_result.append({nic: output})
            result.append({hypervisor: dev_result})
        msg = "Not all hypervisors contains nics in switchev mode"
        self.assertItemsEqual(expected_result, result, msg)
    def test_hypervisor_reboot(self, test='hypervisor_reboot'):
        """Test functionality of DPDK and SRIOV after hypervisor reboot

        The test will spawn up an instance and then will
        reboot the hypervisor that holds the test instance.
        After hypervisor boot up, the instance will be started
        and tested for the accessability.
        """
        servers, key_pair = self.create_and_verify_resources(test=test)
        # Ensure that we are using microversion '2.32' from now
        self.useFixture(
            api_microversion_fixture.APIMicroversionFixture('2.32'))
        LOG.info("Locate instance hypervisor")
        srv_hyper_name = self.os_admin.servers_client.show_server(
            servers[0]['id'])['server']['OS-EXT-SRV-ATTR:host']
        srv_on_hyper = self.hypervisor_client.list_servers_on_hypervisor(
            srv_hyper_name)['hypervisors'][0]['servers']
        LOG.info("Shut down the instances and reboot the hypervisor "
                 "the instance resides on")
        # In order the prevent instances file system corruption,
        # shut down the instance.
        for srv in srv_on_hyper:
            self.servers_client.stop_server(srv['uuid'])
            waiters.wait_for_server_status(self.servers_client, srv['uuid'],
                                           'SHUTOFF')
        shell_utils.run_command_over_ssh(servers[0]['hypervisor_ip'],
                                         "sudo reboot")
        # Reboot of the baremetal hypervisor takes time.
        # In order to not confuse the test, look for the hypervisor status
        # "down" and then "up".
        hyper_rebooted = False
        timeout_start = time.time()
        timeout_end = CONF.nfv_plugin_options.hypervisor_wait_timeout
        while time.time() < timeout_start + timeout_end:
            time.sleep(10)
            hyper_state = self.hypervisor_client.search_hypervisor(
                srv_hyper_name)['hypervisors'][0]['state']
            if 'down' in hyper_state:
                hyper_rebooted = True
                continue
            if hyper_rebooted and 'up' in hyper_state:
                break
        LOG.info("Hypervisor has been rebooted. Booting up the instances.")
        for srv in srv_on_hyper:
            self.servers_client.start_server(srv['uuid'])
            waiters.wait_for_server_status(self.servers_client, srv['uuid'],
                                           'ACTIVE')
        LOG.info("Check instances connectivity")
        for srv in servers:
            self.check_instance_connectivity(ip_addr=srv['fip'],
                                             user=self.instance_user,
                                             key_pair=key_pair['private_key'])
        LOG.info("The hypervisor reboot test passed.")
    def test_restart_ovs(self, test='restart_ovs'):
        """Test restart_ovs

        Check that config is loaded properly
        """
        LOG.info('Starting restart_ovs test.')

        hypervisor_ip = self._get_hypervisor_ip_from_undercloud(
            shell='/home/stack/stackrc')[0]

        cmd = 'sudo systemctl restart openvswitch.service'
        shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
        self.test_deployment_lacp(hypervisor_ip=hypervisor_ip)
Exemplo n.º 5
0
    def test_igmp_restart_ovs(self, test='igmp_restart_ovs'):
        """Test restart ovs

        Check that multicast configuration is not lost after ovs restart.
        Restart ovs and then execute test_igmp_snooping_deployment
        """
        LOG.info('Starting {} test.'.format(test))

        hypervisor_ips = self._get_hypervisor_ip_from_undercloud(
            shell='/home/stack/stackrc')
        cmd = 'sudo systemctl restart openvswitch.service'
        for hyp in hypervisor_ips:
            shell_utils.run_command_over_ssh(hyp, cmd)
        self.test_igmp_snooping_deployment()
Exemplo n.º 6
0
    def test_offload_ovs_config(self):
        """Check ovs config for offload on all hypervisors

        """
        # Command to check if hw-offload is enabled in OVS
        cmd = ("sudo ovs-vsctl get open_vswitch . "
               "other_config:hw-offload")
        # Retrieve all hypvervisors
        hypervisors = self._get_hypervisor_ip_from_undercloud(
            shell='/home/stack/stackrc')
        # Intialize results list
        result = []
        # Expected result is a list of dicts, each dict contains
        # a key which is hypervisor's IP and the value 'true'
        # Example:
        # [{192.0.60.1: 'true'}, {192.0.60.2: 'true'}]
        expected_result = [{ip: 'true'} for ip in hypervisors]
        for hypervisor in hypervisors:
            out = shell_utils.run_command_over_ssh(hypervisor, cmd)
            if out:
                # Strip newlines and remove double quotes
                output = out.rstrip().replace('"', '')
            # HW-Offload not enabled if no text returned
            else:
                output = 'false'
            LOG.info("Hypervisor '{h}' is OVS HW-offload "
                     "capable: '{r}'".format(h=hypervisor,
                                             r=output))
            result.append({hypervisor: output})
        msg = "Not all hypervisors have OVS HW-Offload enabled"
        self.assertItemsEqual(expected_result, result, msg)
    def test_restart_ovs(self, test='restart_ovs'):
        """Test restart_ovs

        Check that config is loaded properly
        """
        LOG.info('Starting restart_ovs test.')

        hypervisor_ip = self._get_hypervisor_ip_from_undercloud()[0]

        cmd = 'sudo systemctl restart openvswitch.service'
        shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
        self.test_deployment_lacp(hypervisor_ip=hypervisor_ip)

        # Give time to have everything up after reboot so that other testcases
        # executed after this one do not fail
        time.sleep(60)
    def test_deployment_lacp(self, test='deployment_lacp', hypervisor_ip=None):
        """Check that lacp bonding is properly configure

        Configuration options example:
         - name: deployment_lacp
           bonding_config:
             - bond_name: 'dpdkbond1'
               bond_mode: 'balance-tcp'
               lacp_status: 'negotiated'
               lacp_time: 'fast'
               lacp_fallback_ab: 'true'
        """
        LOG.info('Starting deployment_lacp test.')

        if hypervisor_ip is None:
            hypervisor_ip = self._get_hypervisor_ip_from_undercloud(
                shell='/home/stack/stackrc')[0]

        bonding_dict = {}
        test_setup_dict = self.test_setup_dict[test]
        if 'config_dict' in test_setup_dict and \
           'bonding_config' in test_setup_dict['config_dict']:
            bonding_dict = test_setup_dict['config_dict']['bonding_config'][0]

        cmd = 'sudo ovs-appctl bond/show {0} | '\
              'egrep "^bond_mode|^lacp_status|^lacp_fallback_ab"; '\
              'sudo ovs-appctl lacp/show {0} | '\
              'egrep "lacp_time"'.format(bonding_dict['bond_name'])
        output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd) \
            .replace('\t', '').replace(' ', '').split('\n')
        bond_data = {}
        for i in range(len(output)):
            data = output[i].split(':')
            if len(data) == 2:
                bond_data[data[0]] = data[1]

        result = []
        checks = {'bond_mode', 'lacp_status', 'lacp_time', 'lacp_fallback_ab'}
        diff_checks_cmd = checks - set(bond_data.keys())
        diff_checks_cfg = checks - set(bonding_dict.keys())
        if len(diff_checks_cmd) > 0:
            result.append("Missing checks: {}. Check ovs commands "
                          "output".format(', '.join(diff_checks_cmd)))

        if len(diff_checks_cmd) > 0:
            result.append("Missing checks: {}. Check testcase config "
                          "file".format(', '.join(diff_checks_cfg)))

        for check in checks:
            if check not in diff_checks_cmd and \
               check not in diff_checks_cfg:
                if bond_data[check] != bonding_dict[check]:
                    result.append("Check failed: {}, Expected: {} - "
                                  "Found: {}".format(check,
                                                     bonding_dict[check],
                                                     bond_data[check]))
        self.assertTrue(len(result) == 0, '. '.join(result))
        return True
 def _get_numa_nodes(self, hypervisor_ip):
     nodes = []
     cmd = "sudo lscpu -p=NODE | grep -v ^#"
     output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
     for line in output.split('\n'):
         if line:
             node = int(line.strip(' '))
             if node not in nodes:
                 nodes.append(node)
     return nodes
    def test_deployment_lacp(self, hypervisor_ip=None):
        """Check that lacp bonding is properly configured

        The test uses the following configuration options example:
        bond_mode: 'balance-tcp'
        lacp_status: 'negotiated'
        lacp_time: 'fast'
        lacp_fallback_ab: 'true'

        The "bond_name" is auto discovered.
        """
        LOG.info('Starting deployment_lacp test.')

        if hypervisor_ip is None:
            hypervisor_ip = self._get_hypervisor_ip_from_undercloud()[0]

        lacp_config = json.loads(CONF.nfv_plugin_options.lacp_config)
        lacp_bond = self.retrieve_lacp_ovs_bond(hypervisor_ip)

        cmd = 'sudo ovs-appctl bond/show {0} | '\
              'egrep "^bond_mode|^lacp_status|^lacp_fallback_ab"; '\
              'sudo ovs-appctl lacp/show {0} | '\
              'egrep "lacp_time"'.format(lacp_bond['bond_name'])
        output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd) \
            .replace('\t', '').replace(' ', '').split('\n')
        bond_data = {}
        for i in range(len(output)):
            data = output[i].split(':')
            if len(data) == 2:
                bond_data[data[0]] = data[1]

        result = []
        checks = {'bond_mode', 'lacp_status', 'lacp_time', 'lacp_fallback_ab'}
        diff_checks_cmd = checks - set(bond_data.keys())
        diff_checks_cfg = checks - set(lacp_config.keys())
        if len(diff_checks_cmd) > 0:
            result.append("Missing checks: {}. Check ovs commands "
                          "output".format(', '.join(diff_checks_cmd)))

        if len(diff_checks_cmd) > 0:
            result.append("Missing checks: {}. Check testcase config "
                          "file".format(', '.join(diff_checks_cfg)))

        for check in checks:
            if check not in diff_checks_cmd and \
               check not in diff_checks_cfg:
                if bond_data[check] != lacp_config[check]:
                    result.append("Check failed: {}, Expected: {} - "
                                  "Found: {}".format(check, lacp_config[check],
                                                     bond_data[check]))
        self.assertTrue(len(result) == 0, '. '.join(result))
        return True
Exemplo n.º 11
0
    def test_offload_ovs_flows(self, test='offload_flows'):
        """Check OVS offloaded flows

        The following test deploy vms, on hw-offload computes.
        It sends async ping and check offload flows exist in ovs.

        :param test: Test name from the external config file.
        """

        LOG.info('Start test_offload_ovs_flows test.')
        LOG.info('test_offload_ovs_flows create vms')
        # Create servers
        servers, key_pair = self.create_and_verify_resources(test=test,
                                                             num_servers=4)
        cmd = 'sudo ovs-appctl dpctl/dump-flows type=offloaded'
        # Iterate over created servers
        for server in servers:

            shell_utils.continuous_ping(server['fip'],
                                        duration=30)
            LOG.info('test_offload_ovs_flows verify flows on geust {}'.
                     format(server['fip']))

            out = shell_utils.\
                run_command_over_ssh(server['hypervisor_ip'],
                                     cmd)
            ports =  \
                self.os_admin.ports_client.list_ports(device_id=server['id'])
            msg = ('Port with mac address {} is expected to be part of '
                   'offloaded flows')
            for port in ports['ports']:
                if 'capabilities' in port['binding:profile'] and 'switchdev'\
                        in port['binding:profile']['capabilities']:
                    self.assertIn(port['mac_address'], out,
                                  msg.format(port['mac_address']))
        # Pings are running check flows exist
        # Retrieve all hypvervisors
        hypervisors = self._get_hypervisor_ip_from_undercloud(
            shell='/home/stack/stackrc')
        # Command to check offloaded flows in OVS
        cmd = 'sudo ovs-appctl dpctl/dump-flows type=offloaded'
        for hypervisor in hypervisors:
            out = shell_utils.run_command_over_ssh(hypervisor,
                                                   cmd)
            msg = 'Hypervisor {} has no offloaded flows in OVS'.format(
                hypervisor)
            self.assertNotEmpty(out, msg)
            LOG.info('Hypercisor {} has offloaded flows in OVS'.format(
                hypervisor))

        # send stop statistics signal
        shell_utils.stop_continuous_ping()
 def _get_dpdk_nics_mapping(self, hypervisor_ip, mac):
     cmd = "sudo cat /var/lib/os-net-config/dpdk_mapping.yaml"
     output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
     dpdk_nics_map = yaml.safe_load(output)
     for dpdk_nic_map in dpdk_nics_map:
         if dpdk_nic_map['mac_address'] == mac:
             return dpdk_nic_map
     else:
         msg = ("Unable to determine DPDK NIC Mapping for "
                "MAC: '%(mac)s'" % {
                    'mac': mac
                })
         raise Exception(msg)
Exemplo n.º 13
0
    def test_igmp_snooping_deployment(self, test='igmp_snooping_deployment'):
        """Check that igmp snooping bonding is properly configure

        mcast_snooping_enable and mcast-snooping-disable-flood-unregistered
        configured in br-int
        """
        LOG.info('Starting {} test.'.format(test))

        hypervisors = self._get_hypervisor_ip_from_undercloud(
            shell='/home/stack/stackrc')

        result = []
        cmd = 'sudo ovs-vsctl --format=json list bridge br-int'
        checks = {
            'mcast_snooping_enable': True,
            'mcast-snooping-disable-flood-unregistered': 'true'
        }

        for hypervisor_ip in hypervisors:
            output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
            # ovs command returns boolean in small letters
            ovs_data = json.loads(output)
            ovs_data_filt = {}
            try:
                ovs_data_filt['mcast_snooping_enable'] = \
                    (ovs_data['data'][0]
                     [ovs_data['headings'].index('mcast_snooping_enable')])
                ovs_data_filt['mcast-snooping-disable-flood-unregistered'] = \
                    (dict(ovs_data['data'][0]
                          [ovs_data['headings'].index('other_config')][1])
                     ['mcast-snooping-disable-flood-unregistered'])
            except Exception:
                pass

            diff_checks_cmd = (set(checks.keys()) - set(ovs_data_filt.keys()))
            if len(diff_checks_cmd) > 0:
                result.append(
                    "{}. Missing checks: {}. Check ovs cmd output".format(
                        hypervisor_ip, ', '.join(diff_checks_cmd)))

            for check in checks:
                if check not in diff_checks_cmd:
                    if ovs_data_filt[check] != checks[check]:
                        msg = ("{}. Check failed: {}. Expected: {} - Found: {}"
                               .format(hypervisor_ip, check, checks[check],
                                       ovs_data_filt[check]))
                        result.append(msg)

        self.assertTrue(len(result) == 0, '. '.join(result))
        return True
    def test_igmp_restart_ovs(self, test='igmp_restart_ovs'):
        """Test restart ovs

        Check that multicast configuration is not lost after ovs restart.
        Restart ovs and then execute test_igmp_snooping_deployment
        """
        LOG.info('Starting {} test.'.format(test))
        network_backend = self.discover_deployment_network_backend()
        hypervisor_ips = self._get_hypervisor_ip_from_undercloud()
        ovs_cmd = 'sudo systemctl restart openvswitch.service'
        for hyp in hypervisor_ips:
            shell_utils.run_command_over_ssh(hyp, ovs_cmd)
        if network_backend == 'ovn':
            ovn_cmd = 'sudo systemctl restart tripleo_ovn_controller.service'
            controller_ips = shell_utils.get_controllers_ip_from_undercloud(
                shell=CONF.nfv_plugin_options.undercloud_rc_file)
            # We assume that controller nodes act as ovn controllers
            for node in controller_ips:
                shell_utils.run_command_over_ssh(hyp, ovn_cmd)
        self.test_igmp_snooping_deployment()

        # Give time to have everything up after reboot so that other testcases
        # executed after this one do not fail
        time.sleep(60)
 def _get_node_nfv_status(self, hypervisor_ip):
     dpdk_status = False
     sriov_status = False
     cmd = ("sudo ovs-vswitchd --version | "
            "awk '{ if ($1 == \"DPDK\") print 1; }';"
            "echo '|';sudo cat /etc/puppet/hieradata/service_names.json")
     output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
     if output:
         params = output.split('|')
         if params:
             if '1' in params[0].strip('\n'):
                 dpdk_status = True
             service_names = json.loads(params[1])
             if (service_names['service_names'] and "neutron_sriov_agent"
                     in service_names['service_names']):
                 sriov_status = True
     return dpdk_status, sriov_status
 def _get_cpu_details(self, hypervisor_ip):
     cmd = "sudo lscpu | grep 'Model name';sudo lscpu | grep 'Flags'"
     output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
     if output:
         cpu_model = ""
         cpu_flags = []
         params = output.split('\n')
         if params:
             for param in params:
                 if "Model name" in param:
                     cpu_model = param.split(':')[1].strip(' \n')
                 elif "Flags" in param:
                     cpu_flags = param.split(':')[1].strip(' \n').split(' ')
         return cpu_model, cpu_flags
     else:
         msg = "Unable to determine 'CPU Model name'"
         raise Exception(msg)
 def _get_physical_memory(self, hypervisor_ip):
     mem_total_kb = 0
     cmd = "sudo dmidecode --type memory | grep 'Size' | grep '[0-9]'"
     output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
     for line in output.split('\n'):
         if line:
             mem_info = line.split(':')[1].strip()
             mem_val = mem_info.split(' ')
             mem_unit = mem_val[1].strip(' ').lower()
             if mem_unit == 'kb':
                 memory_kb = int(mem_val[0].strip(' '))
             elif mem_unit == 'mb':
                 memory_kb = (int(mem_val[0].strip(' ')) * 1024)
             elif mem_unit == 'gb':
                 memory_kb = (int(mem_val[0].strip(' ')) * 1024 * 1024)
             mem_total_kb += memory_kb
     return (mem_total_kb / 1024)
Exemplo n.º 18
0
 def ping_via_network_namespace(self, ping_to_ip, network_id):
     cmd = ("sudo ip netns exec qdhcp-" + network_id
            + " ping -c 10 " + ping_to_ip)
     ctrl_ip = urlparse(CONF.identity.uri).netloc.split(':')[0]
     result = shell_utils.run_command_over_ssh(ctrl_ip, cmd)
     for line in result.split('\n'):
         if 'packets transmitted' in line:
             LOG.info("Ping via namespace result: %s", line)
             received_str = line.split(',')[1].strip()
             try:
                 received = int(received_str.split(' ')[0])
             except ValueError:
                 break
             if received > 0:
                 return True
             break
     return False
Exemplo n.º 19
0
    def get_ovs_interface_statistics(self, interfaces, previous_stats=None,
                                     hypervisor=None):
        """This method get ovs interface statistics

        :param interfaces: interfaces in which statistics will be retrieved
        :param previous_stats: get the difference between current stats and
                               previous stats
        :param hypervisor: hypervisor ip, if None it will be selected the first
                           one
        :return statistics
        """
        self.ip_address = self._get_hypervisor_ip_from_undercloud(
            **{'shell': '/home/stack/stackrc'})
        hypervisor_ip = self.ip_address[0]
        if hypervisor is not None:
            if hypervisor not in self.ip_address:
                raise ValueError('invalid hypervisor ip {}, not in {}'
                                 .format(hypervisor,
                                         ' '.join(self.ip_address)))
            else:
                hypervisor_ip = hypervisor

        shell_utils.check_pid_ovs(hypervisor_ip)
        # We ensure that a number is being parsed, otherwise we fail
        statistics = {}
        for interface in interfaces:
            command = 'sudo ovs-vsctl get Interface {} ' \
                      'statistics'.format(interface)
            statistics[interface] = \
                yaml.safe_load(
                    shell_utils.run_command_over_ssh(
                        hypervisor_ip, command).replace(
                        '"', '').replace(
                        '{', '{"').replace(', ', ', "').replace('=', '":'))
            if previous_stats is not None and \
               interface in previous_stats.keys():
                for stat in statistics[interface].keys():
                    if stat in previous_stats[interface].keys():
                        statistics[interface][stat] -= \
                            previous_stats[interface][stat]
                    else:
                        raise ValueError('missing ovs interface stat {} '
                                         'to compare'.format(stat))

        return statistics
Exemplo n.º 20
0
    def test_offload_nic_eswitch_mode(self, test='offload'):
        """Check eswitch mode of nic for offload on all hypervisors

        :param test: Test name from the external config file.
        """
        test_dict = self.test_setup_dict[test]
        if 'offload_nics' in test_dict:
            offload_nics = test_dict['offload_nics']
        else:
            raise ValueError('offload_nics is not defined in offload test')
        # Retrieve all hypvervisors
        hypervisors = self._get_hypervisor_ip_from_undercloud(
            shell='/home/stack/stackrc')
        # ethtool cmd to retrieve PCI bus of interface
        ethtool_cmd = ("sudo ethtool -i {} | grep bus-info "
                       "| cut -d ':' -f 2,3,4 | awk '{{$1=$1}};1'")
        # devlink cmd to retrieve switch mode of interface
        devlink_cmd = "sudo devlink dev eswitch show pci/{}"
        # Intialize results list
        result = []
        # Expected result is a list of dicts containing a dict of
        # hypervisor's IP, its offload nics as keys and the value 'true'
        # Example:
        # [{'192.0.160.1': [{'p6p1': 'true'}, {'p6p2': 'true'}]},
        #  {'192.0.160.2': [{'p6p1': 'true'}, {'p6p2': 'true'}]}]
        expected_result = [{ip: [{nic: 'true'} for nic in offload_nics]}
                           for ip in hypervisors]
        for hypervisor in hypervisors:
            dev_result = []
            for nic in offload_nics:
                pci = shell_utils.run_command_over_ssh(hypervisor,
                                                       ethtool_cmd.format(nic))
                dev_query = shell_utils.\
                    run_command_over_ssh(hypervisor,
                                         devlink_cmd.format(pci))
                if 'switchdev' in dev_query:
                    output = 'true'
                else:
                    output = 'false'
                LOG.info("Hypervisor '{h}' NIC '{n}' is in switchdev mode: {r}"
                         .format(h=hypervisor, n=nic, r=output))
                dev_result.append({nic: output})
            result.append({hypervisor: dev_result})
        msg = "Not all hypervisors contains nics in switchev mode"
        self.assertItemsEqual(expected_result, result, msg)
Exemplo n.º 21
0
    def get_osp_release(self, hypervisor=None):
        """Gather OSP release

        Takes the OSP release from the hypervisor
        :param hypervisor: Ip of the hypervisor to work on (optional)
        :return OSP version integer
        """
        if not hypervisor:
            hyper_kwargs = {'shell': '/home/stack/stackrc'}
            hypervisor = self._get_hypervisor_ip_from_undercloud(
                **hyper_kwargs)[0]
        ver = shell_utils.\
            run_command_over_ssh(hypervisor, 'cat /etc/rhosp-release')
        if ver == '':
            ver = shell_utils.run_command_over_ssh(
                hypervisor,
                'cat /var/lib/rhos-release/latest-installed')
        return int(re.findall(r'\d+', ver)[0])
Exemplo n.º 22
0
    def validate_no_reboot_in_stack_update(self,
                                           stack_name='overcloud',
                                           hypervisors_ip=False):
        """test node didn't reboot meanwhile stack update

        quries heat api and validate that no reboot
        occuered durring stack update
        """
        LOG.info('Started validateing no reboot meanwhile stack update.')
        LOG.info('Fetching stack update start and end from heat API')
        for event in self.os_client\
            .undercloud_heatclient.events.list(stack_name):
            if event.resource_status_reason == 'Stack UPDATE started':
                update_start = datetime.datetime.strptime(
                    event.event_time, '%Y-%m-%dT%H:%M:%SZ')
            elif event.resource_status_reason ==\
                'Stack UPDATE completed successfully':
                update_end = datetime.datetime.strptime(
                    event.event_time, '%Y-%m-%dT%H:%M:%SZ')
        if not hypervisors_ip:
            LOG.info('Fetching overcloud hypervisors ip addresses')

            hypervisors_ip = self._get_hypervisor_ip_from_undercloud()

        rebooted_hypervisors = []
        for hypervisor in hypervisors_ip:
            try:
                last_reboot = datetime.datetime.strptime(
                    shell_utils.run_command_over_ssh(hypervisor, 'uptime -s'),
                    '%Y-%m-%d %H:%M:%S\n')
            except NoValidConnectionsError:
                LOG.info('One or more of the hypervisor is '
                         'unreachable via ssh please make sure all '
                         'hypervisors are up')
                raise NoValidConnectionsError

            if last_reboot <= update_end and last_reboot >= update_start:
                rebooted_hypervisors.append(hypervisor)

        self.assertEmpty(
            rebooted_hypervisors,
            'Computes with the following {} ip address rebooted '
            'durring the update'.format(rebooted_hypervisors))
Exemplo n.º 23
0
    def validate_kargs(self, ip):
        """validates kernal args are as expected

        :param ip: ip adress of a server to validate
        """
        LOG.info('Validating kargs are competable')
        # TODO(eshulman) replace with a heat query
        expected_args = CONF.nfv_plugin_options.kernel_args.split(' ')
        try:
            cmdline = shell_utils.run_command_over_ssh(ip,
                                                       'cat /proc/cmdline')\
                .split(' ')
        except Exception as err:
            import sys
            self.exec_info = sys.exc_info()
            raise err

        for arg in expected_args:
            self.assertIn(arg, cmdline,
                          'kernel arguments did not update after node reboot')
Exemplo n.º 24
0
    def get_ovs_multicast_groups(self, switch, multicast_ip=None,
                                 hypervisor=None):
        """This method get ovs multicast groups

        :param switch: ovs switch to get multicast groups
        :param multicast_ip: filter by multicast ip
        :param hypervisor: hypervisor ip, if None it will be selected the first
                           one
        :return multicast groups
        """
        self.ip_address = self._get_hypervisor_ip_from_undercloud(
            **{'shell': '/home/stack/stackrc'})
        hypervisor_ip = self.ip_address[0]
        if hypervisor is not None:
            if hypervisor not in self.ip_address:
                raise ValueError('invalid hypervisor ip {}, not in {}'
                                 .format(hypervisor,
                                         ' '.join(self.ip_address)))
            else:
                hypervisor_ip = hypervisor

        shell_utils.check_pid_ovs(hypervisor_ip)

        command = 'sudo ovs-appctl mdb/show {}'.format(switch)
        output = list(filter(None,
                             shell_utils.run_command_over_ssh(
                                 hypervisor_ip, command).split('\n')))
        fields = None
        output_data = []
        for line in output:
            data = list(filter(None, line.split(" ")))
            if fields is None:
                fields = data
            else:
                data = dict(zip(fields, data))
                if multicast_ip is None or \
                   multicast_ip is not None and data['GROUP'] == multicast_ip:
                    output_data.append(data)
        return output_data
Exemplo n.º 25
0
 def check_number_queues(self):
     """This method checks the number of max queues"""
     self.ip_address = self._get_hypervisor_ip_from_undercloud(
         **{'shell': '/home/stack/stackrc'})
     ovs_process_pid = shell_utils.check_pid_ovs(self.ip_address[0])
     count_pmd = "ps -T -p {} | grep pmd | wc -l".format(ovs_process_pid)
     numpmds = int(shell_utils.run_command_over_ssh(self.ip_address[0],
                                                    count_pmd))
     # We ensure that a number is being parsed, otherwise we fail
     cmd = r'sudo ovs-vsctl show | sed -n "s/.*n_rxq=.\([1-9]\).*/\\1/p"'
     numqueues = (shell_utils.
                  run_command_over_ssh(self.ip_address[0],
                                       cmd)).encode('ascii', 'ignore')
     if not isinstance(numqueues, type(str())):
         numqueues = numqueues.decode("utf-8")
     msg = "There are no queues available"
     self.assertNotEqual((numqueues.rstrip("\n")), '', msg)
     # Different multiple queues is not a supported scenario as per now
     self.assertTrue(str.isdigit(numqueues.split("\n")[0]),
                     "Queue recieved is not a digit")
     numqueues = int(numqueues.split("\n")[0])
     maxqueues = numqueues * numpmds
     return maxqueues
 def _get_nodes_cores_info(self, hypervisor_ip):
     dict_cpus = {}
     cmd = "sudo lscpu -p=NODE,CORE,CPU | grep -v ^#"
     output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
     for line in output.split('\n'):
         if line:
             cpu_info = line.split(',')
             node = int(cpu_info[0])
             cpu = int(cpu_info[1])
             thread = int(cpu_info[2])
             # CPU and NUMA node together forms a unique value, as cpu is
             # specific to a NUMA node
             # NUMA node id and cpu id tuple is used for unique key
             dict_key = node, cpu
             if dict_key in dict_cpus:
                 if thread not in dict_cpus[dict_key]['thread_siblings']:
                     dict_cpus[dict_key]['thread_siblings'].append(thread)
             else:
                 cpu_item = {}
                 cpu_item['thread_siblings'] = [thread]
                 cpu_item['cpu'] = cpu
                 cpu_item['numa_node'] = node
                 dict_cpus[dict_key] = cpu_item
     return dict_cpus
    def test_igmp_snooping_deployment(self, test='igmp_snooping_deployment'):
        """Check that igmp snooping bonding is properly configure

        mcast_snooping_enable and mcast-snooping-disable-flood-unregistered
        configured in br-int
        """
        LOG.info('Starting {} test.'.format(test))
        network_backend = self.discover_deployment_network_backend()
        if network_backend == 'ovs':
            hypervisors = self._get_hypervisor_ip_from_undercloud()

            result = []
            cmd = 'sudo ovs-vsctl --format=json list bridge br-int'
            checks = {'mcast_snooping_enable': True,
                      'mcast-snooping-disable-flood-unregistered': True}

            for hypervisor_ip in hypervisors:
                output = shell_utils.run_command_over_ssh(hypervisor_ip, cmd)
                # ovs command returns boolean in small letters
                ovs_data = json.loads(output)
                ovs_data_filt = {}
                try:
                    ovs_data_filt['mcast_snooping_enable'] = \
                        (ovs_data['data'][0]
                         [ovs_data['headings'].index('mcast_snooping_enable')])
                    ovs_data_filt['mcast-snooping-disable'
                                  '-flood-unregistered'] = \
                        (dict(ovs_data['data'][0]
                              [ovs_data['headings'].index('other_config')][1])
                         ['mcast-snooping-disable-flood-unregistered'])
                except Exception:
                    pass

                diff_checks_cmd = (set(checks.keys())
                                   - set(ovs_data_filt.keys()))
                if len(diff_checks_cmd) > 0:
                    result.append("{}. Missing checks: {}. Check ovs cmd "
                                  "output".format(hypervisor_ip,
                                                  ', '.join(diff_checks_cmd)))

                for check in checks:
                    if check in diff_checks_cmd:
                        if type(ovs_data_filt[check]) == str:
                            # If object is not equal to 'true' or 'false'
                            # ValueError exception will be raised
                            ovs_data_filt[check] = \
                                strtobool(ovs_data_filt[check])
                        if ovs_data_filt[check] != checks[check]:
                            msg = ("{}. Check failed: {}. Expected: {} "
                                   "- Found: {}"
                                   .format(hypervisor_ip, check, checks[check],
                                           ovs_data_filt[check]))
                            result.append(msg)
            self.assertTrue(len(result) == 0, '. '.join(result))
        # We assume that controller nodes act as ovn controllers
        elif network_backend == 'ovn':
            controller = shell_utils.get_controllers_ip_from_undercloud(
                shell=CONF.nfv_plugin_options.undercloud_rc_file)[0]
            # Configuration should be identical across controllers
            igmp_configured = shell_utils.get_value_from_ini_config(
                controller, '/var/lib/config-data/puppet-generated'
                '/neutron/etc/neutron/neutron.conf', 'ovs',
                'igmp_snooping_enable')
            self.assertTrue(igmp_configured, 'IGMP not enabled in deployment')
            ovn_logical_switches_cmd = ('sudo podman exec -it ovn_controller'
                                        ' ovn-nbctl list Logical_Switch')
            ovn_logical_switches_output = shell_utils.run_command_over_ssh(
                controller, ovn_logical_switches_cmd)
            # We expect to have at least a single logical switch
            if '_uuid 'not in ovn_logical_switches_output:
                raise ValueError('Failed to query OVN northbound DB'
                                 ', no logical switch info was returned')
            re_igmp_string = \
                r'mcast_snoop="true"'
            pattern = re.compile(re_igmp_string)
            igmp_lines = pattern.findall(ovn_logical_switches_output)
            LOG.info('Located {} logical switches with IGMP enabled'
                     .format(len(igmp_lines)))
            msg = "IGMP is not enabled on any logical switch"
            self.assertNotEmpty(igmp_lines, msg)
            re_flood_string = \
                r'mcast_flood_unregistered="\w+"'
            flood_pattern = re.compile(re_flood_string)
            flood_lines = \
                flood_pattern.findall(ovn_logical_switches_cmd)
            for line in flood_lines:
                if 'true' in line:
                    LOG.warning("Located a logical switch with "
                                "'mcast_flood_unregistered' set to 'true', "
                                "this is not optimal and will potentially"
                                "cause multicast to behave as broadcast. "
                                "This setting should be set to 'false'")

        else:
            raise ValueError("Network backend '{}' is not supported"
                             .format(network_backend))
        return True
    def test_check_igmp_reports(self, test='check_igmp_reports'):
        """Check igmp reports are forwarded

        Check igmp reports are forwarded from the vms and arrive to
        the switch. It will be check the internal bridge in which it
        is the nic connected to the external switch
        Tests https://bugzilla.redhat.com/show_bug.cgi?id=1933734
        """
        LOG.info('Starting {} test.'.format(test))

        if self.external_resources_data is None:
            raise ValueError('External resource data is required for the test')

        servers, key_pair = self.create_and_verify_resources(test=test)

        igmp_reports = CONF.nfv_plugin_options.igmp_reports
        reports_interface = igmp_reports['reports_interface']

        tcpdump_file = tempfile.NamedTemporaryFile().name
        test_server = servers[0]

        # Check tcpdump is installed
        cmd_check_dump = "PATH=$PATH:/usr/sbin; which tcpdump " \
                         "2>/dev/null || true"
        LOG.info('Executed on {}: {}'.format(
            test_server['hypervisor_ip'], cmd_check_dump))
        output = shell_utils.run_command_over_ssh(test_server['hypervisor_ip'],
                                                  cmd_check_dump)
        self.assertNotEqual(output, '', "tcpdump not installed in {}".format(
            test_server['hypervisor_ip']))

        # It will generate the join/leave igmp messages
        cmd_mcast = "sudo timeout 3 python " \
                    "/usr/local/bin/multicast_traffic.py -r -g 239.1.1.1 " \
                    "-p 5000 -c 1 || true"
        # Capture all igmp messages
        cmd_dump = "PATH=$PATH:/usr/sbin; sudo tcpdump -i {} igmp " \
                   "> {} 2>&1 &".format(reports_interface, tcpdump_file)
        # Filter only igmp reports messages (join/leave), not igmp queries
        cmd_result = "sudo killall -SIGINT tcpdump;cat {} | " \
                     "( grep igmp || true ) | ( grep report || true ) | " \
                     "wc -l".format(tcpdump_file)

        ssh_source = self.get_remote_client(test_server['fip'],
                                            username=self.
                                            instance_user,
                                            private_key=key_pair[
                                                'private_key'])

        LOG.info('Executed on {}: {}'.format(
            test_server['hypervisor_ip'], cmd_dump))
        shell_utils.run_command_over_ssh(test_server['hypervisor_ip'],
                                         cmd_dump)

        LOG.info('Executed on {}: {}'.format(test_server['fip'],
                                             cmd_mcast))
        ssh_source.exec_command(cmd_mcast)

        LOG.info('Executed on {}: {}'.format(test_server['hypervisor_ip'],
                                             cmd_mcast))
        output = int(shell_utils.run_command_over_ssh(
            test_server['hypervisor_ip'], cmd_result))

        self.assertGreater(output, 0, "No igmp reports received")
        LOG.info('Igmp reports being forwarded properly')
Exemplo n.º 29
0
    def test_hypervisor_tuning(self, test='hypervisor_tuning'):
        """Test tuning state of hypervisor

        Test the following states:
          - Packages (given in config)
          - Active services (given in config)
          - Tuned active profile (given in config)
          - Kernel arguments (given in config)
        """
        tuning_details = \
            json.loads(CONF.nfv_plugin_options.hypervisor_tuning_details)
        packages = tuning_details.get("packages")
        services = tuning_details.get("services")
        tuned_profiles = tuning_details.get("tuned_profiles")
        kernel_args = tuning_details.get("kernel_args")

        self.hypervisor_ip = self._get_hypervisor_ip_from_undercloud()[0]
        self.assertNotEmpty(self.hypervisor_ip, "No hypervisor found")

        test_result = []
        if packages:
            pkg_check = "rpm -qa | grep"
            for package in packages:
                tmpl = " -e ^{}"
                pkg_check += tmpl.format(package)
            result = shell_utils.run_command_over_ssh(self.hypervisor_ip,
                                                      pkg_check).split()
            if result:
                for pkg in packages:
                    if not fnmatch.filter(result, pkg):
                        test_result.append(
                            "Missing required packages. "
                            "Found following packages: {}".format(result))
                LOG.info("Found the following packages: {}".format(result))
            else:
                test_result.append("Packages: no output received")

        if services:
            svc_check = "systemctl is-active"
            for service in services:
                svc_check += " {}".format(service)
            result = shell_utils.run_command_over_ssh(self.hypervisor_ip,
                                                      svc_check).strip('\n')
            if result:
                if result.split('\n').count('active') != len(services):
                    test_result.append("Some of the requested services are "
                                       "not in an active state.")
                LOG.info('The services states - {}'.format(
                    list(zip(services, result.split('\n')))))
            else:
                test_result.append("Services: no output received")

        if tuned_profiles:
            cmd = "sudo tuned-adm active | awk '{print $4}'"
            result = shell_utils.run_command_over_ssh(self.hypervisor_ip,
                                                      cmd).strip('\n')
            if result not in tuned_profiles:
                test_result.append(
                    "Tuned {0} profile is not Active".format(tuned_profiles))

        if kernel_args:
            grub_output_cmd = "sudo cat /proc/cmdline"
            result = shell_utils.run_command_over_ssh(self.hypervisor_ip,
                                                      grub_output_cmd)
            if result:
                for arg in kernel_args:
                    if arg not in result:
                        test_result.append(
                            "The kernel args are missing - {}".format(arg))
            else:
                test_result.append("Kernel args: no output received")

        test_result = '\n'.join(test_result)
        self.assertEmpty(test_result, test_result)
Exemplo n.º 30
0
    def learn_queues(self, servers, key_pair):
        """learn about queues

        Learn about queues:
        * mapping physical/virtual queues
        * rate/cpu params

        :param servers: server dict with vms
        :param key_pair: key pair
        """
        learning_config = CONF.nfv_plugin_options.multiqueue_learning
        if not learning_config["learn"]:
            LOG.info('Skipping multiqueue learning due to configuration.')
            return
        LOG.info('Starting multiqueue learning')

        # set affinity to be sure that there are no 2 physical queues in the
        # same pmd which causes that one virtual queue will be used instead of
        # 2 and it will not be able to map one physical queue with one
        # virtual queue
        for interface in learning_config["pmd_rxq_affinity"]:
            cmd_affinity = "sudo ovs-vsctl set Interface {} " \
                           "other_config:pmd-rxq-affinity=\"{}\"".\
                format(interface["interface"],
                       interface["pmd_rxq_affinity"])
            LOG.info('learn_queues cmd {}'.format(cmd_affinity))
            shell_utils.run_command_over_ssh(
                servers['testpmd']['hypervisor_ip'], cmd_affinity)

        # training cmd
        cmd_training = "{} --action gen_traffic --pps \"{}\" --traffic_json" \
                       " {} --duration {} --multiplier {}".\
            format(learning_config["injector"],
                   learning_config["pps"],
                   learning_config["queues_json"],
                   learning_config["duration"],
                   learning_config["multiplier"])
        LOG.info('learn_queues cmd {}'.format(cmd_training))
        servers['trex']['ssh_source'].exec_command(cmd_training)

        # get pmd stats
        cmd_pmd_rxq_show = "sudo ovs-appctl dpif-netdev/pmd-rxq-show"
        LOG.info('learn_queues cmd {}'.format(cmd_pmd_rxq_show))
        pmd_rxq_output = shell_utils.run_command_over_ssh(
            servers['testpmd']['hypervisor_ip'], cmd_pmd_rxq_show)

        LOG.info('learn_queues queues: {}'.format(pmd_rxq_output))

        # remove configure affinity before after the learning
        for interface in learning_config["pmd_rxq_affinity"]:
            cmd_affinity = "sudo ovs-vsctl remove Interface {} " \
                           "other_config pmd-rxq-affinity".\
                format(interface["interface"])
            LOG.info('learn_queues cmd {}'.format(cmd_affinity))
            shell_utils.run_command_over_ssh(
                servers['testpmd']['hypervisor_ip'], cmd_affinity)

        # copy pmd file to trex vm
        with tempfile.NamedTemporaryFile() as fp:
            fp.write(pmd_rxq_output.encode())
            fp.flush()
            self.copy_file_to_remote_host(servers['trex']['fip'],
                                          key_pair['private_key'],
                                          self.instance_user,
                                          files=os.path.basename(fp.name),
                                          src_path=os.path.dirname(fp.name),
                                          dst_path=os.path.dirname(fp.name),
                                          timeout=60)

        # parse pmd file and update queues.json file
        cmd_pmd_parse = "{} --action parse_pmd_stats  --pmd_stats {} " \
                        "--traffic_json {} --pps \"{}\"".\
            format(learning_config["injector"],
                   fp.name,
                   learning_config["queues_json"],
                   learning_config["pps"])
        LOG.info('learn_queues cmd {}'.format(cmd_pmd_parse))
        servers['trex']['ssh_source'].exec_command(cmd_pmd_parse)

        LOG.info('Multiqueue learning finished')