Exemplo n.º 1
0
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
Exemplo n.º 2
0
def vif_model_check(request):
    vif_model = request.param

    LOG.fixture_step(
        "Get a network that supports {} to boot vm".format(vif_model))
    pci_net = network_helper.get_pci_vm_network(pci_type=vif_model,
                                                net_name='internal0-net')
    if not pci_net:
        skip(SkipHostIf.PCI_IF_UNAVAIL)

    extra_pcipt_net_name = extra_pcipt_net = None
    if not isinstance(pci_net, str):
        pci_net, extra_pcipt_net_name = pci_net
    LOG.info("PCI network selected to boot vm: {}".format(pci_net))

    LOG.fixture_step("Create a flavor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated',
                                          ram=2048,
                                          cleanup='module')[1]
    extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.fixture_step("Boot a base vm with above flavor and virtio nics")

    mgmt_net_id = network_helper.get_mgmt_net_id()
    pci_net_id, seg_id, pnet_name = network_helper.get_network_values(
        network=pci_net,
        fields=('id', 'provider:segmentation_id', 'provider:physical_network'))

    nics = [{'net-id': mgmt_net_id}, {'net-id': pci_net_id}]
    nics_to_test = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': pci_net_id,
        'vif-model': vif_model
    }]
    pcipt_seg_ids = {}
    if vif_model == 'pci-passthrough':
        pcipt_seg_ids[pci_net] = seg_id
        if extra_pcipt_net_name:
            extra_pcipt_net, seg_id = network_helper.get_network_values(
                network=extra_pcipt_net_name,
                fields=('id', 'provider:segmentation_id'))
            nics.append({'net-id': extra_pcipt_net})
            nics_to_test.append({
                'net-id': extra_pcipt_net,
                'vif-model': vif_model
            })
            pcipt_seg_ids[extra_pcipt_net_name] = seg_id

    base_vm = vm_helper.boot_vm(flavor=flavor_id, nics=nics,
                                cleanup='module')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
    vm_helper.ping_vms_from_vm(base_vm,
                               base_vm,
                               net_types=['mgmt', 'internal'])

    return vif_model, base_vm, flavor_id, nics_to_test, pcipt_seg_ids, pnet_name, extra_pcipt_net
Exemplo n.º 3
0
def test_non_primary_tenant():
    vm_1 = vm_helper.boot_vm(cleanup='function',
                             auth_info=Tenant.get('tenant1'))[1]
    vm_2 = vm_helper.launch_vms(vm_type='dpdk',
                                auth_info=Tenant.get('tenant1'))[0][0]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_1)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_2)
    vm_helper.ping_vms_from_natbox(vm_ids=vm_2)
    vm_helper.ping_vms_from_vm(vm_2, vm_1, net_types='mgmt')
Exemplo n.º 4
0
def _ping_vm_data(vm_under_test, base_vm_id, action):
    LOG.tc_step(
        "Verify ping vm_under_test {} from vm {} over mgmt & data networks works after {}"
        .format(vm_under_test, base_vm_id, action))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)
    vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                               from_vm=base_vm_id,
                               net_types=['data'],
                               retry=10)
Exemplo n.º 5
0
def test_ping_vms_from_vm_1(vm_count):
    from_vm = vm_helper.get_any_vms(count=1)[0]
    if vm_count == 'all':
        vm_ids = None
    else:
        vm_ids = vm_helper.get_any_vms(count=vm_count)

    assert vm_ids != ()

    vm_helper.ping_vms_from_vm(to_vms=vm_ids, from_vm=from_vm, fail_ok=False)
Exemplo n.º 6
0
    def test_multiports_on_same_network_evacuate_vm(self, vifs,
                                                    check_avs_pattern,
                                                    base_setup):
        """
        Test evacuate vm with multiple ports on same network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm with specified vif model
            base_setup (tuple): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant
                network with base vm,
            and ping it from NatBox     (class)
            - Ping vm2's own data network ips       (class)
            - Ping vm2 from vm1 to verify management and data networks connection   (class)

        Test Steps:
            - Reboot vm2 host
            - Wait for vm2 to be evacuated to other host
            - Wait for vm2 pingable from NatBox
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks still works

        Teardown:
            - Delete created vms and flavor
        """

        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = base_setup
        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        host = vm_helper.get_vm_host(vm_under_test)

        LOG.tc_step("Reboot vm host {}".format(host))
        vm_helper.evacuate_vms(host=host,
                               vms_to_check=vm_under_test,
                               ping_vms=True)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management and data networks "
            "still works after evacuation.")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                   from_vm=base_vm,
                                   net_types=['mgmt', 'data'])
Exemplo n.º 7
0
def test_evacuate_dpdk_and_vhost_vms(add_admin_role_func):
    """
    Skip:
        - Less than 2 up hypervisors with same storage config available on system
    Setups:
        - Add admin role to tenant user under test
    Test Steps:
        - Launch 3 vms on same host with following configs:
            - dpdk vm with 2 vcpus
            - vhost vm with 2 vcpus
            - vhost vm with 3 vcpus
        - sudo reboot -f on vm host
        - Check vms are moved to other host, in active state, and are pingable after evacuation
    Teardown:
        - Remove admin role from tenant user
        - Wait for failed host to recover
        - Delete created vms
    """
    hosts = host_helper.get_up_hypervisors()
    if len(hosts) < 2:
        skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS)

    LOG.tc_step("Boot an observer VM")
    vm_observer = launch_vm(vm_type='dpdk', num_vcpu=2, host=hosts[1])
    vm_helper.setup_avr_routing(vm_observer)

    LOG.tc_step("Launch dpdk and vhost vms")
    vms = []
    vm_host = hosts[0]
    for vm_info in (('dpdk', 3), ('vhost', 2), ('vhost', 3)):
        vm_type, num_vcpu = vm_info
        vm_id = launch_vm(vm_type=vm_type, num_vcpu=num_vcpu, host=vm_host)
        vm_helper.setup_avr_routing(vm_id, vm_type=vm_type)
        vms.append(vm_id)

    LOG.tc_step(
        "Ensure dpdk and vhost vms interfaces are reachable before evacuate")
    vm_helper.ping_vms_from_vm(vms,
                               vm_observer,
                               net_types=['data', 'internal'],
                               vshell=True)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vms, ping_vms=True)
    vm_helper.ping_vms_from_vm(vms,
                               vm_observer,
                               net_types=['data', 'internal'],
                               vshell=True)
Exemplo n.º 8
0
def test_ping_vms_from_vm_various_images(vm_image):
    image_id = glance_helper.get_image_id_from_name(name=vm_image,
                                                    strict=False)
    if not image_id:
        skip("No image name has substring: {}.".format(vm_image))

    vol_size = 1
    if vm_image in ['ubuntu', 'centos']:
        vol_size = 8
    vol_id = cinder_helper.create_volume(name='vol_' + vm_image,
                                         source_id=image_id,
                                         size=vol_size)[1]
    vm_id = vm_helper.boot_vm(source='volume', source_id=vol_id)[1]

    vm_helper.ping_vms_from_vm(from_vm=vm_id)
Exemplo n.º 9
0
def _boot_multiports_vm(flavor,
                        mgmt_net_id,
                        vifs,
                        net_id,
                        net_type,
                        base_vm,
                        pcipt_seg_id=None):
    nics = [{'net-id': mgmt_net_id}]

    nics, glance_vif = _append_nics_for_net(vifs, net_id=net_id, nics=nics)
    img_id = None
    if glance_vif:
        img_id = glance_helper.create_image(name=glance_vif,
                                            hw_vif_model=glance_vif,
                                            cleanup='function')[1]

    LOG.tc_step(
        "Boot a test_vm with following nics on same networks as base_vm: {}".
        format(nics))
    vm_under_test = vm_helper.boot_vm(name='multiports',
                                      nics=nics,
                                      flavor=flavor,
                                      cleanup='function',
                                      image_id=img_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

    if pcipt_seg_id:
        LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                   net_seg_id=pcipt_seg_id,
                                                   init_conf=True)

    LOG.tc_step("Ping test_vm's own {} network ips".format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                               from_vm=vm_under_test,
                               net_types=net_type)

    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)

    LOG.tc_step(
        "Ping test_vm from base_vm to verify management and data networks connection"
    )
    vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    return vm_under_test, nics
Exemplo n.º 10
0
def check_vm_pci_interface(vms,
                           net_type,
                           seg_id=None,
                           ping_timeout=VMTimeout.PING_VM):
    for vm in vms:
        vm_helper.wait_for_vm_pingable_from_natbox(vm, timeout=ping_timeout)

    LOG.tc_step(
        "Check vms mgmt and {} interfaces reachable from other vm".format(
            net_type))
    if seg_id:
        for vm_id in vms:
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=seg_id)

    # Ensure pci interface working well
    vm_helper.ping_vms_from_vm(vms, vms[0], net_types=['mgmt', net_type])
Exemplo n.º 11
0
def test_qos_update(setup_qos):
    """
    Tests network QoS update
    Test Setup:
    - create a qos policy
    - get mgmt net id
    - get internal net id
    - record the original qos values for above two networks
    - return qos, mgmt_net, internal_net

    Test Steps:
    -update networks with created qos
    -test ping over networks

    Test teardown:
    - restore the qos settings for both networks
    - delete the qos created by fixture
    """

    internal_net_id, mgmt_net_id, qos_new = setup_qos
    LOG.tc_step("Booting first vm.")
    nics = [{'net-id': mgmt_net_id}, {'net-id': internal_net_id}]

    vm1 = vm_helper.boot_vm(name='vm1', nics=nics, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm1)

    LOG.tc_step("Updating mgmt and internal networks to created QoS.")
    network_helper.update_net_qos(net_id=mgmt_net_id, qos_id=qos_new)
    network_helper.update_net_qos(net_id=internal_net_id, qos_id=qos_new)

    LOG.tc_step("Booting second vm.")
    vm2 = vm_helper.boot_vm(name='vm2', nics=nics, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm2)

    LOG.tc_step("Pinging vm1 from natbox after updating QoS.")
    vm_helper.wait_for_vm_pingable_from_natbox(vm1)

    LOG.tc_step("Testing ping between vms.")
    vm_helper.ping_vms_from_vm(to_vms=vm2,
                               from_vm=vm1,
                               net_types=['internal', 'mgmt'])
    vm_helper.ping_vms_from_vm(to_vms=vm1,
                               from_vm=vm2,
                               net_types=['internal', 'mgmt'])
Exemplo n.º 12
0
def test_boot_vms():

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id
    }]

    for guest_os in ['ubuntu_14', 'cgcs-guest']:
        glance_helper.get_guest_image(guest_os)
        vm_id = vm_helper.boot_vm(guest_os=guest_os, nics=nics)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        time.sleep(30)
        vm_helper.ping_vms_from_vm(vm_id,
                                   vm_id,
                                   net_types=['mgmt', 'data', 'internal'])
Exemplo n.º 13
0
def test_dpdk_vm_nova_actions(vm_type, num_vcpu):
    """
    DPDK VM with nova operations,  and evacuation test cases

    Test Steps:
        - Create flavor for dpdk
        - Create a dpdk vm
        - Perform nova actions and verify connectivity
        - Perform evacuation

    Test Teardown:
        - Delete vms, ports, subnets, and networks created

    """
    LOG.tc_step("Boot an observer VM")
    vms, nics = vm_helper.launch_vms(vm_type="dpdk")
    vm_observer = vms[0]
    vm_helper.setup_avr_routing(vm_observer)

    vm_id = launch_vm(vm_type=vm_type, num_vcpu=num_vcpu)
    vm_helper.setup_avr_routing(vm_id, vm_type=vm_type)

    for vm_actions in [['reboot'], ['pause', 'unpause'], ['suspend', 'resume'],
                       ['live_migrate'], ['cold_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step("Ping vm")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_helper.ping_vms_from_vm(vm_id,
                                   vm_observer,
                                   net_types=['data', 'internal'],
                                   vshell=True)
Exemplo n.º 14
0
 def operation_rebuild(vm_id_):
     code, msg = vm_helper.rebuild_vm(vm_id=vm_id_)
     assert 0 == code, msg
     vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
     vm_helper.ping_vms_from_vm(vm_id, vm_id, net_types=('data', 'internal'))
def test_load_balancing_chained_service_function(protocol, nsh_aware,
                                                 same_host, symmetric,
                                                 check_system,
                                                 add_admin_role_module):
    """
        Test Load Balancing Chained Service Function

        Test Steps:
            - Check if the system is compatible
            - Boot the source VM, dest VM
            - Boot 3 SFC VM
            - Install necessary software and package inside guest for packet forwarding test
            - Create port pair using nsh_ware <True:False> for each SFC VM
            - Create port pair group with 3 port pair
            - Create SFC flow classifier using protocol <tcp:icmp:udp>
            - Create port Chain with port pair group created
            - Check packet forwarding from source to dest vm via SFC vm

        Test Teardown:
            - Delete port chain, port pair group, port pair, flow classifier, vms, volumes created

    """
    nsh_aware = True if nsh_aware == 'nsh_aware' else False
    same_host = True if same_host == 'same_host' else False
    symmetric = True if symmetric == 'symmetric' else False

    LOG.tc_step("Check if the system is compatible to run this test")
    computes = check_system

    LOG.tc_step("Boot the VM in same host: {}".format(same_host))
    hosts_to_boot = [computes[0]] * 3 if same_host else computes[0:3]
    LOG.info("Boot the VM in following compute host 1:{}, 2:{}, 3:{}".format(
        hosts_to_boot[0], hosts_to_boot[1], hosts_to_boot[2]))

    LOG.tc_step("Boot the source and dest VM")
    vm_ids = []
    vm_ids, source_vm_id, dest_vm_id, internal_net_id, mgmt_net_id, mgmt_nic = _setup_vm(
        vm_ids, hosts_to_boot)
    vm_helper.ping_vms_from_vm(to_vms=source_vm_id,
                               from_vm=dest_vm_id,
                               net_types=['mgmt'],
                               retry=10)

    LOG.tc_step("Boot the 3 SFC VM")
    sfc_vm_ids = []
    sfc_vm_ids, sfc_vm_under_test, ingress_port_id1, egress_port_id1 = \
        _setup_sfc_vm(sfc_vm_ids, hosts_to_boot, mgmt_nic, internal_net_id)

    sfc_vm_ids, sfc_vm_under_test2, ingress_port_id2, egress_port_id2 = \
        _setup_sfc_vm(sfc_vm_ids, hosts_to_boot, mgmt_nic, internal_net_id)
    sfc_vm_ids, sfc_vm_under_test3, ingress_port_id3, egress_port_id3 = \
        _setup_sfc_vm(sfc_vm_ids, hosts_to_boot, mgmt_nic, internal_net_id)

    for sfc_vm_id in sfc_vm_ids:
        vm_helper.ping_vms_from_vm(to_vms=source_vm_id,
                                   from_vm=sfc_vm_id,
                                   net_types=['mgmt'],
                                   retry=10)

    # if protocol != 'icmp':
    LOG.tc_step("Install software package nc in vm {} {}".format(
        source_vm_id, dest_vm_id))

    vm_helper.scp_to_vm_from_natbox(vm_id=source_vm_id,
                                    source_file='/home/cgcs/sfc/tcp_client.py',
                                    dest_file='/root/tcp_client.py')
    vm_helper.scp_to_vm_from_natbox(
        vm_id=source_vm_id,
        source_file='/home/cgcs/sfc/loop_tcp_client.sh',
        dest_file='/root/loop_tcp_client.sh')
    vm_helper.scp_to_vm_from_natbox(
        vm_id=source_vm_id,
        source_file='/home/cgcs/sfc/tcp_server_multi.py',
        dest_file='/root/tcp_server_multi.py')
    vm_helper.scp_to_vm_from_natbox(vm_id=dest_vm_id,
                                    source_file='/home/cgcs/sfc/tcp_client.py',
                                    dest_file='/root/tcp_client.py')
    vm_helper.scp_to_vm_from_natbox(
        vm_id=dest_vm_id,
        source_file='/home/cgcs/sfc/loop_tcp_client.sh',
        dest_file='/root/loop_tcp_client.sh')
    vm_helper.scp_to_vm_from_natbox(
        vm_id=dest_vm_id,
        source_file='/home/cgcs/sfc/tcp_server_multi.py',
        dest_file='/root/tcp_server_multi.py')
    _install_sw_packages_in_vm(source_vm_id)
    _install_sw_packages_in_vm(dest_vm_id)

    for sfc_vm in sfc_vm_ids:
        LOG.tc_step("copy vxlan tool in sfc vm {}".format(sfc_vm))
        vm_helper.scp_to_vm_from_natbox(
            vm_id=sfc_vm,
            source_file='/home/cgcs/sfc/vxlan_tool.py',
            dest_file='/root/vxlan_tool.py')

    LOG.tc_step("Create Port Pair for 3 SFC VM")
    port_pair_ids = []
    port_pair_id1 = _setup_port_pair(nsh_aware, ingress_port_id1,
                                     egress_port_id1)
    port_pair_ids.append(port_pair_id1)
    port_pair_id2 = _setup_port_pair(nsh_aware, ingress_port_id2,
                                     egress_port_id2)
    port_pair_ids.append(port_pair_id2)
    port_pair_id3 = _setup_port_pair(nsh_aware, ingress_port_id3,
                                     egress_port_id3)
    port_pair_ids.append(port_pair_id3)

    LOG.tc_step(
        "Create Port Pair group using 3 port pairs:{}".format(port_pair_ids))
    port_pair_group_ids = []
    port_pair_group_id = _setup_port_pair_groups(port_pair_ids)
    port_pair_group_ids.append(port_pair_group_id)

    LOG.tc_step("Create flow classifier")
    flow_classifier_name = 'sfc_flow_classifier'
    flow_classifier, dest_vm_internal_net_ip = _setup_flow_classifier(
        flow_classifier_name, source_vm_id, dest_vm_id, protocol)

    LOG.tc_step("Create Port Chain")
    _setup_port_chain(port_pair_group_ids, flow_classifier, symmetric)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=True)
Exemplo n.º 16
0
    def test_multiports_on_same_network_pci_evacuate_vm(self, base_setup_pci,
                                                        vifs):
        """
        Test evacuate vm with multiple ports on same network

        Args:
            base_setup_pci (tuple): base vm id, vm under test id, segment id
                for internal0-net1
            vifs (list): list of vifs to add to same internal net

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used
            by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping
            it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with base vm,
            and ping it from NatBox     (class)
            - Ping vm2's own data network ips       (class)
            - Ping vm2 from vm1 to verify management and internal networks
            connection   (class)

        Test Steps:
            - Reboot vm2 host
            - Wait for vm2 to be evacuated to other host
            - Wait for vm2 pingable from NatBox
            - Verify ping from vm1 to vm2 over management and internal
            networks still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        internal_net_id = None
        pcipt_included = False
        nics = copy.deepcopy(base_nics)
        if 'pci-passthrough' in vifs:
            if not avail_pcipt_net:
                skip(SkipHostIf.PCIPT_IF_UNAVAIL)
            pcipt_included = True
            internal_net_id = avail_pcipt_net
            if extra_pcipt_net:
                nics.append(
                    {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})
        if 'pci-sriov' in vifs:
            if not avail_sriov_net:
                skip(SkipHostIf.SRIOV_IF_UNAVAIL)
            internal_net_id = avail_sriov_net
        assert internal_net_id, "test script error. sriov or pcipt has to be " \
                                "included."

        for vif in vifs:
            nics.append({'net-id': internal_net_id, 'vif-model': vif})

        LOG.tc_step(
            "Boot a vm with following vifs on same network internal0-net1: "
            "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci_evac',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])
        vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, and "
            "internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        host = vm_helper.get_vm_host(vm_under_test)

        LOG.tc_step("Reboot vm host {}".format(host))
        vm_helper.evacuate_vms(host=host, vms_to_check=vm_under_test,
                               ping_vms=True)

        if pcipt_included:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device "
                "for vm {}.".format(vm_under_test))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management and "
            "internal networks still works after evacuation.")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'internal'])
Exemplo n.º 17
0
def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs):
    """
    Ping between two vms with given vif models

    Test Steps:
        - Create a favor with dedicated cpu policy and proper root disk size
        - Create a volume from guest image under test with proper size
        - Boot two vms with given vif models from above volume and flavor
        - Ping VMs from NatBox and between two vms

    Test Teardown:
        - Delete vms, volumes, flavor, glance image created

    """
    if guest_os == 'default':
        guest_os = GuestImages.DEFAULT['guest']

    reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True
    cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None
    image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup,
                                             use_existing=reuse)

    LOG.tc_step("Create a favor dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os,
                                          cleanup='function')[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    net_ids = (mgmt_net_id, tenant_net_id, internal_net_id)
    vms = []
    for vifs_for_vm in (vm1_vifs, vm2_vifs):
        # compose vm nics
        nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id,
                             guest_os=guest_os)
        net_types = ['mgmt', 'data', 'internal'][:len(nics)]
        LOG.tc_step("Create a volume from {} image".format(guest_os))
        vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os),
                                             source_id=image_id,
                                             guest_image=guest_os,
                                             cleanup='function')[1]

        LOG.tc_step(
            "Boot a {} vm with {} vifs from above flavor and volume".format(
                guest_os, vifs_for_vm))
        vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id,
                                  cleanup='function',
                                  source='volume', source_id=vol_id, nics=nics,
                                  guest_os=guest_os)[1]

        LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        vms.append(vm_id)

    LOG.tc_step(
        "Ping between two vms over management, data, and internal networks")
    vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1],
                               net_types=net_types)
    vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0],
                               net_types=net_types)
Exemplo n.º 18
0
    def base_setup_pci(self):
        LOG.fixture_step(
            "(class) Get an internal network that supports both pci-sriov and "
            "pcipt vif to boot vm")
        avail_pcipt_nets, is_cx4 = network_helper.get_pci_vm_network(
            pci_type='pci-passthrough',
            net_name='internal0-net', rtn_all=True)
        avail_sriov_nets, _ = network_helper.get_pci_vm_network(
            pci_type='pci-sriov',
            net_name='internal0-net', rtn_all=True)

        if not avail_pcipt_nets and not avail_sriov_nets:
            skip(SkipHostIf.PCI_IF_UNAVAIL)

        avail_nets = list(set(avail_pcipt_nets) & set(avail_sriov_nets))
        extra_pcipt_net = avail_pcipt_net = avail_sriov_net = None
        pcipt_seg_ids = {}
        if avail_nets:
            avail_net_name = avail_nets[-1]
            avail_net, segment_id = network_helper.get_network_values(
                network=avail_net_name,
                fields=('id', 'provider:segmentation_id'))
            internal_nets = [avail_net]
            pcipt_seg_ids[avail_net_name] = segment_id
            avail_pcipt_net = avail_sriov_net = avail_net
            LOG.info(
                "Internal network(s) selected for pcipt and sriov: {}".format(
                    avail_net_name))
        else:
            LOG.info("No internal network support both sriov and pcipt")
            internal_nets = []
            if avail_pcipt_nets:
                avail_pcipt_net_name = avail_pcipt_nets[-1]
                avail_pcipt_net, segment_id = network_helper.get_network_values(
                    network=avail_pcipt_net_name,
                    fields=('id', 'provider:segmentation_id'))
                internal_nets.append(avail_pcipt_net)
                pcipt_seg_ids[avail_pcipt_net_name] = segment_id
                LOG.info("pci-passthrough net: {}".format(avail_pcipt_net_name))
            if avail_sriov_nets:
                avail_sriov_net_name = avail_sriov_nets[-1]
                avail_sriov_net = network_helper.get_net_id_from_name(
                    avail_sriov_net_name)
                internal_nets.append(avail_sriov_net)
                LOG.info("pci-sriov net: {}".format(avail_sriov_net_name))

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        base_nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}]
        nics = base_nics + [{'net-id': net_id} for net_id in internal_nets]

        if avail_pcipt_nets and is_cx4:
            extra_pcipt_net_name = avail_nets[0] if avail_nets else \
                avail_pcipt_nets[0]
            extra_pcipt_net, seg_id = network_helper.get_network_values(
                network=extra_pcipt_net_name,
                fields=('id', 'provider:segmentation_id'))
            if extra_pcipt_net not in internal_nets:
                nics.append({'net-id': extra_pcipt_net})
                pcipt_seg_ids[extra_pcipt_net_name] = seg_id

        LOG.fixture_step("(class) Create a flavor with dedicated cpu policy.")
        flavor_id = \
            nova_helper.create_flavor(name='dedicated', vcpus=2, ram=2048,
                                      cleanup='class')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated',
                       FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        LOG.fixture_step(
            "(class) Boot a base pci vm with following nics: {}".format(nics))
        base_vm_pci = \
            vm_helper.boot_vm(name='multiports_pci_base', flavor=flavor_id,
                              nics=nics, cleanup='class')[1]

        LOG.fixture_step("(class) Ping base PCI vm interfaces")
        vm_helper.wait_for_vm_pingable_from_natbox(base_vm_pci)
        vm_helper.ping_vms_from_vm(to_vms=base_vm_pci, from_vm=base_vm_pci,
                                   net_types=['data', 'internal'])

        return base_vm_pci, flavor_id, base_nics, avail_sriov_net, \
            avail_pcipt_net, pcipt_seg_ids, extra_pcipt_net
Exemplo n.º 19
0
    def test_multiports_on_same_network_pci_vm_actions(self, base_setup_pci,
                                                       vifs):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            base_setup_pci (tuple): base_vm_pci, flavor, mgmt_net_id,
                tenant_net_id, internal_net_id, seg_id
            vifs (list): list of vifs to add to same internal net

        Setups:
            - Create a flavor with dedicated cpu policy (class)
            - Choose management net, one tenant net, and internal0-net1 to be
            used by test (class)
            - Boot a base pci-sriov vm - vm1 with above flavor and networks,
            ping it from NatBox (class)
            - Ping vm1 from itself over data, and internal networks

        Test Steps:
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with vm1,
                and ping it from NatBox
            - Ping vm2's own data and internal network ips
            - Ping vm2 from vm1 to verify management and data networks
            connection
            - Perform one of the following actions on vm2
                - set to error/ wait for auto recovery
                - suspend/resume
                - cold migration
                - pause/unpause
            - Update vlan interface to proper eth if pci-passthrough device
            moves to different eth
            - Verify ping from vm1 to vm2 over management and data networks
            still works
            - Repeat last 3 steps with different vm actions

        Teardown:
            - Delete created vms and flavor
        """

        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        pcipt_included = False
        internal_net_id = None
        for vif in vifs:
            if not isinstance(vif, str):
                vif = vif[0]
            if 'pci-passthrough' in vif:
                if not avail_pcipt_net:
                    skip(SkipHostIf.PCIPT_IF_UNAVAIL)
                internal_net_id = avail_pcipt_net
                pcipt_included = True
                continue
            elif 'pci-sriov' in vif:
                if not avail_sriov_net:
                    skip(SkipHostIf.SRIOV_IF_UNAVAIL)
                internal_net_id = avail_sriov_net

        assert internal_net_id, "test script error. Internal net should have " \
                                "been determined."

        nics, glance_vif = _append_nics_for_net(vifs, net_id=internal_net_id,
                                                nics=base_nics)
        if pcipt_included and extra_pcipt_net:
            nics.append(
                {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})

        img_id = None
        if glance_vif:
            img_id = glance_helper.create_image(name=glance_vif,
                                                hw_vif_model=glance_vif,
                                                cleanup='function')[1]

        LOG.tc_step("Boot a vm with following vifs on same internal net: "
                    "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False, image_id=img_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, "
            "and internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        for vm_actions in [['auto_recover'], ['cold_migrate'],
                           ['pause', 'unpause'], ['suspend', 'resume']]:
            if 'auto_recover' in vm_actions:
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, "
                    "then verify ping from base vm over management and "
                    "internal networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=False, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    vm_helper.perform_action_on_vm(vm_under_test, action=action)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_under_test)
            if pcipt_included:
                LOG.tc_step(
                    "Bring up vlan interface for pci-passthrough vm {}.".format(
                        vm_under_test))
                vm_helper.add_vlan_for_vm_pcipt_interfaces(
                    vm_id=vm_under_test, net_seg_id=pcipt_seg_ids)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and internal networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_pci,
                                       net_types=['mgmt', 'internal'])
Exemplo n.º 20
0
def test_evacuate_pci_vm(vif_model_check):
    """
    Test evacuate vm with multiple ports on same network

    Args:

    Setups:
        - create a flavor with dedicated cpu policy (module)
        - choose one tenant network and one internal network to be used by test (module)
        - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
        - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
        and ping it from NatBox     (class)
        - Ping vm2's own data network ips       (class)
        - Ping vm2 from vm1 to verify management and data networks connection   (class)

    Test Steps:
        - Reboot vm2 host
        - Wait for vm2 to be evacuated to other host
        - Wait for vm2 pingable from NatBox
        - Verify ping from vm1 to vm2 over management and data networks still works

    Teardown:
        - Delete created vms and flavor
    """
    vif_model, base_vm, flavor_id, nics_to_test, seg_id, net_type, pnet_name, extra_pcipt_net = vif_model_check

    LOG.tc_step("Boot a vm with {} vif model on {} net".format(
        vif_model, net_type))
    res, vm_id, err = vm_helper.boot_vm(name=vif_model,
                                        flavor=flavor_id,
                                        cleanup='function',
                                        nics=nics_to_test)
    assert 0 == res, "VM is not booted successfully. Error: {}".format(err)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id,
                                                   init_conf=True)

    LOG.tc_step("Ping vm over mgmt and {} nets from base vm".format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    host = vm_helper.get_vm_host(vm_id)

    # Remove the following ssh VM to sync code once CGTS-9279 is fixed
    LOG.tc_step("Login in to VM & do sync command")
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        vm_ssh.exec_sudo_cmd('sync')

    LOG.tc_step("Reboot vm host {}".format(host))
    vm_helper.evacuate_vms(host=host,
                           vms_to_check=vm_id,
                           ping_vms=True,
                           wait_for_host_up=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step(
            "Add vlan to pci-passthrough interface for VM again after evacuation due to interface change."
        )
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id)

    LOG.tc_step(
        "Check vm still pingable over mgmt, and {} nets after evacuation".
        format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    LOG.tc_step(
        "Wait for rebooted host {} to recover and ensure vm are still reachable"
        .format(host))
    host_helper.wait_for_hosts_ready(hosts=host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])
Exemplo n.º 21
0
    def test_multiports_on_same_network_vm_actions(self, vifs, base_setup):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm
                with specified (vif_mode, pci_address)
            base_setup (list): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used
            by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping
            it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks
            connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks
            still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = \
            base_setup

        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        for vm_actions in [['auto_recover'],
                           ['cold_migrate'],
                           ['pause', 'unpause'],
                           ['suspend', 'resume'],
                           ['hard_reboot']]:
            if vm_actions[0] == 'auto_recover':
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, then verify ping from "
                    "base vm over management and data networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=True, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    if 'migrate' in action and system_helper.is_aio_simplex():
                        continue

                    kwargs = {}
                    if action == 'hard_reboot':
                        action = 'reboot'
                        kwargs['hard'] = True
                    kwargs['action'] = action

                    vm_helper.perform_action_on_vm(vm_under_test, **kwargs)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

            # LOG.tc_step("Verify vm pci address preserved after {}".format(
            # vm_actions))
            # check_helper.check_vm_pci_addr(vm_under_test, nics)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and data networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm,
                                       net_types=['mgmt', 'data'])
Exemplo n.º 22
0
def test_ping_vms_from_vm_2():
    to_vms = vm_helper.get_any_vms(auth_info=Tenant.get('admin'),
                                   all_tenants=True)
    for vm in vm_helper.get_any_vms():
        vm_helper.ping_vms_from_vm(to_vms=to_vms, from_vm=vm)
Exemplo n.º 23
0
def test_interface_attach_detach_max_vnics(guest_os, if_attach_arg, vifs,
                                           check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach to maximum vnics

    Setups:
        - Boot a base vm with mgmt net and internal0-net1   (module)

    Test Steps:
        - Boot a vm with only mgmt interface
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - Perform VM action - Cold migrate, live migrate, pause resume, suspend resume
        - Verify ping between base_vm and vm_under_test over mgmt & tenant network after vm operation
        - detach all the tenant interface
        - Repeat attach/detach after performing each vm action

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """
    if guest_os == 'vxworks' and not system_helper.is_avs():
        skip('e1000 vif unsupported by OVS')

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm

    glance_vif = None
    if not (if_attach_arg == 'port_id' and system_helper.is_avs()):
        for vif in vifs:
            if vif[0] in ('e1000', 'rtl8139'):
                glance_vif = vif[0]
                break

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if (not glance_vif and re.search(
        GuestImages.TIS_GUEST_PATTERN, guest_os)) else 'function'
    image_id = glance_helper.get_guest_image(
        guest_os=guest_os,
        cleanup=cleanup,
        use_existing=False if cleanup else True)

    if glance_vif:
        glance_helper.set_image(image_id,
                                hw_vif_model=glance_vif,
                                new_name='{}_{}'.format(guest_os, glance_vif))

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=1,
                                          guest_os=guest_os,
                                          cleanup='function')[1]

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    code, vol_id = cinder_helper.create_volume(name='vol-' + guest_os,
                                               source_id=image_id,
                                               fail_ok=True,
                                               guest_image=guest_os,
                                               cleanup='function')
    assert 0 == code, "Issue occurred when creating volume"
    source_id = vol_id

    LOG.tc_step("Boot a vm with mgmt nic only")
    vm_under_test = vm_helper.boot_vm(name='if_attach_tenant',
                                      nics=[mgmt_nic],
                                      source_id=source_id,
                                      flavor=flavor_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]
    prev_port_count = 1
    for vm_actions in [['live_migrate'], ['cold_migrate'],
                       ['pause', 'unpause'], ['suspend', 'resume'],
                       ['stop', 'start']]:
        tenant_port_ids = []
        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Attach specified vnics to the VM before {} and bring up interfaces"
                .format(vm_actions))
            expt_vnics = 1
            for vif in vifs:
                vif_model, vif_count = vif
                expt_vnics += vif_count
                LOG.info("iter {}".format(vif_count))
                for i in range(vif_count):
                    if if_attach_arg == 'port_id':
                        vif_model = vif_model if system_helper.is_avs(
                        ) else None
                        port = network_helper.create_port(
                            net_id=tenant_net_id,
                            wrs_vif=vif_model,
                            cleanup='function',
                            name='attach_{}_{}'.format(vif_model, i))[1]
                        kwargs = {'port_id': port}
                    else:
                        kwargs = {'net_id': tenant_net_id}
                    tenant_port_id = vm_helper.attach_interface(
                        vm_under_test, **kwargs)[1]
                    tenant_port_ids.append(tenant_port_id)
                LOG.info(
                    "Attached new vnics to the VM {}".format(tenant_port_ids))

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            LOG.info("vnics attached to VM: {}".format(vm_ports_count))
            assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

            LOG.info(
                "Bring up all the attached new vifs {} on tenant net from vm".
                format(vifs))
            _bring_up_attached_interface(vm_under_test,
                                         ports=tenant_port_ids,
                                         guest_os=guest_os,
                                         base_vm=base_vm_id)

            if expt_vnics == 16:
                LOG.tc_step(
                    "Verify no more vnic can be attached after reaching upper limit 16"
                )
                res = vm_helper.attach_interface(vm_under_test,
                                                 net_id=tenant_net_id,
                                                 fail_ok=True)[0]
                assert res == 1, "vnics attach exceed maximum limit"

        if vm_actions[0] == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, then verify ping from "
                "base vm over management and data networks")
            vm_helper.set_vm_state(vm_id=vm_under_test,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
            # if 'vxworks' not in guest_os:
            #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)
        else:
            LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                vm_under_test, vm_actions))
            for action in vm_actions:
                vm_helper.perform_action_on_vm(vm_under_test, action=action)
                if action == 'cold_migrate' or action == 'start':
                    LOG.tc_step(
                        "Bring up all the attached tenant interface from vm after {}"
                        .format(vm_actions))
                    # if 'vxworks' not in guest_os:
                    #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_id,
                                       net_types=['mgmt', 'data'],
                                       retry=10)

            LOG.tc_step("Detach all attached interface {} after {}".format(
                tenant_port_ids, vm_actions))
            for tenant_port_id in tenant_port_ids:
                vm_helper.detach_interface(vm_id=vm_under_test,
                                           port_id=tenant_port_id,
                                           cleanup_route=True)

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            assert prev_port_count == vm_ports_count, "VM ports still listed after interface-detach"
            res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                             from_vm=vm_under_test,
                                             fail_ok=True,
                                             net_types=['data'],
                                             retry=0)[0]
            assert not res, "Detached interface still works"
Exemplo n.º 24
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
Exemplo n.º 25
0
def test_port_trunking():
    """
    Port trunking feature test cases

    Test Steps:
        - Create networks
        - Create subnets
        - Create a parent port and subports
        - Create a truck with parent port and subports
        - Boot the first vm with the trunk
        - Create the second trunk without subport
        - Boot the second vm
        - Add support to trunk
        - Configure vlan interfaces inside guests
        - Verify connectivity via vlan interfaces
        - Remove the subport from trunk and verify connectivity
        - Add the support to trunk and verify connectivity
        - Do vm actions and verify connectivity


    Test Teardown:
        - Delete vms, ports, subnets, and networks created

    """
    vif_model = 'avp' if system_helper.is_avs() else None
    network_names = ['network11', 'network12', 'network13']
    net_ids = []
    sub_nets = ["30.0.0.0/24", "30.0.1.0/24", "30.0.2.0/24"]
    subnet_ids = []
    # parent ports and sub ports for trunk 1 and trunk 2
    trunk1_parent_port = 'vrf10'
    trunk1_subport_1 = 'vrf11'
    trunk1_subport_2 = 'vrf12'

    trunk2_parent_port = 'host10'
    trunk2_subport_1 = 'host11'
    trunk2_subport_2 = 'host12'

    # vlan id for the subports
    segment_1 = 1
    segment_2 = 2

    LOG.tc_step("Create Networks to be used by trunk")
    for net in network_names:
        net_ids.append(
            network_helper.create_network(name=net, cleanup='function')[1])

    LOG.tc_step("Create Subnet on the Network Created")
    for sub, network in zip(sub_nets, net_ids):
        subnet_ids.append(
            network_helper.create_subnet(network=network,
                                         subnet_range=sub,
                                         gateway='none',
                                         cleanup='function')[1])

    # Create Trunks
    LOG.tc_step("Create Parent port for trunk 1")
    t1_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk1_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t1_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk1_parent_port)[0]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk1_subport_1,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk1_subport_2,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t1_sub_ports = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t1_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 1")
    trunk1_id = network_helper.create_trunk(t1_parent_port_id,
                                            name='trunk-1',
                                            sub_ports=t1_sub_ports,
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{'net-id': mgmt_net_id}, {'port-id': t1_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm_id = vm_helper.boot_vm(name='vm-with-trunk1-port',
                              nics=nics,
                              cleanup='function')[1]
    LOG.tc_step("Setup vlan interfaces inside guest")
    _bring_up_vlan_interface(vm_id, 'eth1', [segment_1])

    # Create second trunk port  with out the subports and vm
    LOG.tc_step("Create Parent port for trunk 2")
    t2_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk2_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t2_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk2_parent_port)[0]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk2_subport_1,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk2_subport_2,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t2_sub_ports = [{
        'port': t2_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t2_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 2")
    trunk2_id = network_helper.create_trunk(t2_parent_port_id,
                                            name='trunk-2',
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics_2 = [{'net-id': mgmt_net_id}, {'port-id': t2_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm2_id = vm_helper.boot_vm(name='vm-with-trunk2-port',
                               nics=nics_2,
                               cleanup='function')[1]

    LOG.tc_step("Add the sub ports to the second truck")
    network_helper.set_trunk(trunk2_id, sub_ports=t2_sub_ports)

    LOG.tc_step("Setup VLAN interfaces inside guest")
    _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

    # ping b/w 2 vms using the vlan interfaces
    eth_name = 'eth1.1'

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        ip_addr = network_helper.get_ip_for_eth(eth_name=eth_name,
                                                ssh_client=vm_ssh)

    if ip_addr:
        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step("Ping on vlan interface from guest")
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

    # unset the subport on trunk_1 and try the ping (it will fail)
    LOG.tc_step(
        "Removing a subport from trunk and ping on vlan interface inside guest"
    )
    ret_code_10 = network_helper.unset_trunk(trunk1_id,
                                             sub_ports=[t1_sub_port1_id])[0]
    assert ret_code_10 == 0, "Subports not removed as expected."

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        ping = network_helper.ping_server(ip_addr,
                                          ssh_client=vm2_ssh,
                                          num_pings=20,
                                          fail_ok=True)[0]
        assert ping == 100, "Ping did not fail as expected."

    # set the subport on trunk_1 and try the ping (it will work)
    LOG.tc_step(
        " Add back the subport to trunk and ping on vlan interface inside guest"
    )
    t1_sub_port = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }]
    network_helper.set_trunk(trunk1_id, sub_ports=t1_sub_port)

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    # VM operation and ping
    for vm_actions in [['pause', 'unpause'], ['suspend', 'resume'],
                       ['live_migrate'], ['cold_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm2_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm2_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management networks still works "
            "after {}".format(vm_actions))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm2_id,
                                   net_types=['mgmt'])

        if vm_actions[0] == 'cold_migrate':
            LOG.tc_step("Setup VLAN interfaces inside guest")
            _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step(
                "Ping on vlan interface from guest after action {}".format(
                    vm_actions))
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

        vm_host = vm_helper.get_vm_host(vm2_id)

        vm_on_target_host = vm_helper.get_vms_on_host(vm_host)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm2_id, ping_vms=True)

    for vm_id_on_target_host in vm_on_target_host:
        LOG.tc_step("Setup VLAN interfaces inside guest")
        _bring_up_vlan_interface(vm_id_on_target_host, 'eth1', [segment_1])

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest after evacuation")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    LOG.tc_step(
        "Attempt to delete trunk when in use, expect pass for AVS only")
    code = network_helper.delete_trunks(trunks=trunk1_id, fail_ok=True)[0]

    if system_helper.is_avs():
        assert 0 == code, "Failed to delete port trunk when it's used by a running VM wiht AVS"
    else:
        assert 1 == code, "Trunk is deleted when it's used by a running VM with OVS"
Exemplo n.º 26
0
def test_neutron_port_security(setup_port_security, port_security):
    """
    Test neutron port security enabled/disabled with IP spoofing

    Args:
        n/a

    Pre-requisites:
        - System should be have ml2 driver capable
    Setups:
        - Enable Extension ml2 driver in system if its not already enabled
        - Enable Port Security in the network if its not already enabled
    Test Steps:
        - Set port_security on existing neutron networks
        - Boot 2 vms to test where userdata sets a static ip that is different than nova show
        - Verify IP Spoofing fails when port security is enabled, and vise versa
        - Delete spoofed vms
        - Boot another 2 vms without userdata
        - Verify ping between VMs work when without ip spoofing attach
        - Change vm2 mac address and verify IP spoofing fails only when port security is enabled
        - Revert vm2 mac address and verify ping between vms work again
    Teardown:
        - Delete created vms, volumes, etc

    """
    internal_net_id, nics = setup_port_security

    port_security_enabled = True if port_security == 'enabled' else False
    LOG.tc_step(
        "Ensure port security is {} on neutron networks".format(port_security))
    internal_net_port_security = eval(
        network_helper.get_network_values(internal_net_id,
                                          'port_security_enabled')[0])
    if internal_net_port_security is not port_security_enabled:
        LOG.info('Set port security to {} on existing neutron networks'.format(
            port_security))
        networks = network_helper.get_networks(auth_info=Tenant.get('admin'))
        for net in networks:
            network_helper.set_network(
                net_id=net, enable_port_security=port_security_enabled)

    # Test IP protection
    LOG.tc_step(
        "Launch two VMs with port security {} with mismatch IP in userdata than neutron port"
        .format(port_security))
    vms = []
    for i in (1, 2):
        user_data = '{}/vm{}-userdata.txt'.format(StxPath.USERDATA, i)
        vm_name = 'vm{}_mismatch_ip_ps_{}'.format(i, port_security)
        vm = vm_helper.boot_vm(name=vm_name,
                               nics=nics,
                               cleanup='function',
                               user_data=user_data)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm)
        vms.append(vm)
    vm1, vm2 = vms
    vm_helper.ping_vms_from_vm(to_vms=vm2,
                               from_vm=vm1,
                               net_types=['mgmt'],
                               retry=10)

    vm2_ip = '10.1.0.2'
    expt_res = 'fails' if port_security_enabled else 'succeeds'
    LOG.tc_step(
        "With port security {}, verify ping over internal net {} with mismatch IPs"
        .format(port_security, expt_res))
    packet_loss_rate = _ping_server(vm1,
                                    ip_addr=vm2_ip,
                                    fail_ok=port_security_enabled)
    if port_security_enabled:
        assert packet_loss_rate == 100, "IP spoofing succeeded when port security is enabled"

    LOG.info("Delete VMs with mismatch IPs")
    vm_helper.delete_vms(vms)

    # Test MAC protection
    LOG.tc_step(
        "Launch two VMs without IP Spoofing and check ping between vms works")
    vms = []
    for i in (1, 2):
        vm = vm_helper.boot_vm(name='vm{}_ps_{}'.format(i, port_security),
                               nics=nics,
                               cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm)
        vms.append(vm)
    vm1, vm2 = vms
    vm_helper.ping_vms_from_vm(vm2,
                               from_vm=vm1,
                               net_types=['mgmt', 'internal'])

    LOG.tc_step(
        "With port security {}, change VM mac address and ensure ping over internal net {}"
        .format(port_security, expt_res))
    origin_mac_addr = network_helper.get_ports(server=vm2,
                                               network=internal_net_id,
                                               field='MAC Address')[0]
    vm2_ip = network_helper.get_internal_ips_for_vms(vm2)[0]
    new_mac_addr = _change_mac_address(vm2, origin_mac_addr)
    packet_loss_rate = _ping_server(vm1,
                                    ip_addr=vm2_ip,
                                    fail_ok=port_security_enabled)
    if port_security_enabled:
        assert packet_loss_rate == 100, "IP spoofing succeeded when port security is enabled"

    LOG.tc_step(
        "With port security {}, revert VM mac address and ensure ping over internal net succeeds"
        .format(port_security))
    _change_mac_address(vm2, new_mac_addr, origin_mac_addr)
    _ping_server(vm1, ip_addr=vm2_ip, fail_ok=False)
Exemplo n.º 27
0
def test_interface_attach_detach_on_paused_vm(guest_os, boot_source, vifs,
                                              check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach on stopped vm

    Setups:
        - Boot a base vm with mgmt net and tenant_port_id (module)

    Test Steps:
        - Boot a vm with mgmt and avp port interface
        - Pause the vm
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - perform live migration on paused vm
        - unpause the vm
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - detach all the tenant interface
        - Verify ping to tenant interfaces fail

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm
    LOG.tc_step("Create a avp port")
    init_port_id = network_helper.create_port(tenant_net_id,
                                              'tenant_avp_port',
                                              wrs_vif='avp',
                                              cleanup='function')[1]
    tenant_net_nic = {'port-id': init_port_id, 'vif-model': 'avp'}

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'module'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    LOG.tc_step(
        "Boot a {} vm and flavor from {} with a mgmt and a data interface".
        format(guest_os, boot_source))
    vm_under_test = vm_helper.boot_vm('if_attach-{}-{}'.format(
        guest_os, boot_source),
                                      nics=[mgmt_nic, tenant_net_nic],
                                      source=boot_source,
                                      image_id=image_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]

    _ping_vm_data(vm_under_test=vm_under_test,
                  base_vm_id=base_vm_id,
                  action='boot')

    LOG.tc_step(
        "Pause vm {} before attaching interfaces".format(vm_under_test))
    vm_helper.perform_action_on_vm(vm_under_test, action='pause')

    LOG.tc_step("Create and attach vnics to the VM: {}".format(vifs))
    tenant_port_ids = network_helper.get_ports(server=vm_under_test,
                                               network=tenant_net_id)
    expt_vnics = 2
    new_vnics = 0
    for vif in vifs:
        vif_model, vif_count = vif
        expt_vnics += vif_count
        LOG.info("iter {}".format(vif_count))
        LOG.info("Create and attach {} {} vnics to vm {}".format(
            vif_count, vif_model, vm_under_test))
        for i in range(vif_count):
            name = 'attached_port-{}_{}'.format(vif_model, i)
            port_id = network_helper.create_port(net_id=tenant_net_id,
                                                 name=name,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
            vm_helper.attach_interface(vm_under_test, port_id=port_id)
            new_vnics += 1
            tenant_port_ids.append(port_id)

    vm_ports_count = len(network_helper.get_ports(server=vm_under_test))
    LOG.info("vnics attached to VM: {}".format(vm_ports_count))
    assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

    if expt_vnics == 16:
        res = vm_helper.attach_interface(vm_under_test,
                                         net_id=tenant_net_id,
                                         fail_ok=True)[0]
        assert res == 1, "vnics attach exceed maximum limit"

    LOG.tc_step("Live migrate paused vm")
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')

    LOG.tc_step(
        "Unpause live-migrated vm, bring up attached interfaces and ping the VM"
    )
    vm_helper.perform_action_on_vm(vm_under_test, action='unpause')
    _bring_up_attached_interface(
        vm_under_test,
        guest_os=guest_os,
        ports=tenant_port_ids,
        base_vm=base_vm_id,
        action='pause, attach interfaces, live migrate and unpause')

    LOG.tc_step("Live migrate again after unpausing the vm")
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')
    _ping_vm_data(vm_under_test, base_vm_id, action='live migrate')

    LOG.tc_step("Detach ports: {}".format(tenant_port_ids))
    for tenant_port_id in tenant_port_ids:
        vm_helper.detach_interface(vm_id=vm_under_test, port_id=tenant_port_id)
        new_vnics -= 1

    res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                     from_vm=vm_under_test,
                                     fail_ok=True,
                                     net_types=['data'],
                                     retry=0)[0]
    assert not res, "Ping from base_vm to vm via detached interface still works"

    LOG.tc_step(
        "Attach single interface with tenant id {}".format(tenant_net_id))
    port_id = vm_helper.attach_interface(vm_under_test,
                                         net_id=tenant_net_id)[1]
    new_vnics += 1

    LOG.tc_step(
        "Live migrate vm after detach/attach, bring up interfaces and ensure ping works"
    )
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')
    _bring_up_attached_interface(vm_under_test,
                                 guest_os=guest_os,
                                 ports=[port_id],
                                 base_vm=base_vm_id,
                                 action='attach interface and live migrate')
Exemplo n.º 28
0
def _test_pci_resource_usage(vif_model_check):
    """
    Create a vm under test with specified vifs for tenant network

    Returns (str): id of vm under test

    """
    vif_model, base_vm, flavor_id, nics_to_test, seg_id, net_type, pnet_name, extra_pcipt_net_name, extra_pcipt_net = \
        vif_model_check

    LOG.tc_step("Ensure core/vm quota is sufficient")

    if 'sriov' in vif_model:
        vm_type = 'sriov'
        resource_param = 'pci_vfs_used'
        max_resource = 'pci_vfs_configured'
    else:
        vm_type = 'pcipt'
        resource_param = 'pci_pfs_used'
        max_resource = 'pci_pfs_configured'

    LOG.tc_step(
        "Get resource usage for {} interface before booting VM(s)".format(
            vif_model))
    LOG.info("provider net for {} interface: {}".format(vif_model, pnet_name))

    assert pnet_name, "provider network for {} interface is not found".format(
        vif_model)

    total_val, pre_resource_value = nova_helper.get_pci_interface_stats_for_providernet(
        pnet_name, fields=(max_resource, resource_param))
    LOG.info("Resource Usage {} for {}. Resource configured: {}".format(
        pre_resource_value, vif_model, total_val))

    expt_change = 2 if vif_model == 'pci-passthrough' and extra_pcipt_net else 1
    vm_limit = int((total_val - pre_resource_value) /
                   expt_change) if vif_model == 'pci-passthrough' else 5
    vm_helper.ensure_vms_quotas(vm_limit + 5)
    vms_under_test = []
    for i in range(vm_limit):
        LOG.tc_step("Boot a vm with {} vif model on {} net".format(
            vif_model, net_type))
        vm_id = vm_helper.boot_vm(name=vif_model,
                                  flavor=flavor_id,
                                  cleanup='function',
                                  nics=nics_to_test)[1]
        vms_under_test.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        if vm_type == 'pcipt':
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=seg_id)

        LOG.tc_step(
            "Ping vm over mgmt and {} nets from itself".format(net_type))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm_id,
                                   net_types=['mgmt', net_type])

        LOG.tc_step(
            "Check resource usage for {} interface increased by 1".format(
                vif_model))
        resource_value = nova_helper.get_provider_net_info(
            pnet_name, field=resource_param)
        assert pre_resource_value + expt_change == resource_value, "Resource usage for {} is not increased by {}". \
            format(vif_model, expt_change)

        pre_resource_value = resource_value

    for vm_to_del in vms_under_test:
        LOG.tc_step(
            "Check resource usage for {} interface reduced by 1 after deleting a vm"
            .format(vif_model))
        vm_helper.delete_vms(vm_to_del, check_first=False, stop_first=False)
        resource_val = common.wait_for_val_from_func(
            expt_val=pre_resource_value - expt_change,
            timeout=30,
            check_interval=3,
            func=nova_helper.get_provider_net_info,
            providernet_id=pnet_name,
            field=resource_param)[1]

        assert pre_resource_value - expt_change == resource_val, "Resource usage for {} is not reduced by {}". \
            format(vif_model, expt_change)
        pre_resource_value = resource_val
Exemplo n.º 29
0
    def test_pci_vm_nova_actions(self, pci_numa_affinity,
                                 pci_irq_affinity_mask, pci_alias,
                                 vif_model_check, pci_dev_numa_nodes):
        """
        Test vm actions on vm with multiple ports with given vif models on the same tenant network

        Args:

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify ping from vm1 to vm2 over management and data networks still works
            - Verify the correct number of PCI devices are created, in correct types,
                    the numa node of the PCI devices aligns with that of CPUs, and affined CPUs for PCI devices
                    are same as specified by 'pci_alias' (if applicable)

        Teardown:
            - Delete created vms and flavor
        """
        pci_irq_affinity_mask, pci_alias = _convert_irqmask_pcialias(
            pci_irq_affinity_mask, pci_alias)
        boot_forbidden = False
        migrate_forbidden = False
        if pci_numa_affinity == 'required' and pci_alias is not None:
            host_count = pci_dev_numa_nodes
            if host_count == 0:
                boot_forbidden = True
            elif host_count == 1:
                migrate_forbidden = True
        LOG.tc_step(
            "Expected result - Disallow boot: {}; Disallow migrate: {}".format(
                boot_forbidden, migrate_forbidden))

        self.pci_numa_affinity = pci_numa_affinity
        self.pci_alias = pci_alias
        self.pci_irq_affinity_mask = pci_irq_affinity_mask

        if pci_alias is not None:
            LOG.info('Check if PCI-Alias devices existing')
            self.is_pci_device_supported(pci_alias)

        self.vif_model, self.base_vm, self.base_flavor_id, self.nics_to_test, self.seg_id, \
            self.pnet_name, self.extra_pcipt_net = vif_model_check

        LOG.tc_step(
            "Create a flavor with specified extra-specs and dedicated cpu policy"
        )
        flavor_id = self.create_flavor_for_pci()

        LOG.tc_step("Boot a vm with {} vif model on internal net".format(
            self.vif_model))
        # TODO: feature unavailable atm. Update required
        # resource_param = 'pci_vfs_used' if 'sriov' in self.vif_model else 'pci_pfs_used'
        # LOG.tc_step("Get resource usage for {} interface before booting VM(s)".format(self.vif_model))
        # pre_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)

        res, vm_id, err = vm_helper.boot_vm(name=self.vif_model,
                                            flavor=flavor_id,
                                            cleanup='function',
                                            nics=self.nics_to_test,
                                            fail_ok=boot_forbidden)
        if boot_forbidden:
            assert res > 0, "VM booted successfully while it numa node for pcipt/sriov and pci alias mismatch"
            return

        self.vm_id = vm_id

        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=self.seg_id,
                                                       init_conf=True)

        LOG.tc_step("Ping vm over mgmt and internal nets from base vm")
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.vm_id,
                                   net_types=['mgmt', 'internal'])
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        self.vm_topology = vm_helper.get_vm_values(
            vm_id=self.vm_id, fields='wrs-res:topology')[0]
        vnic_type = 'direct' if self.vif_model == 'pci-sriov' else 'direct-physical'
        self.pci_nics = vm_helper.get_vm_nics_info(vm_id=self.vm_id,
                                                   vnic_type=vnic_type)
        assert self.pci_nics

        self.wait_check_vm_states(step='boot')

        # TODO: feature unavailable atm. Update required
        # LOG.tc_step("Check {} usage is incremented by 1".format(resource_param))
        # post_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)
        # expt_change = 2 if self.vif_model == 'pci-passthrough' and self.extra_pcipt_net else 1
        # assert pre_resource_value + expt_change == post_resource_value, "{} usage is not incremented by {} as " \
        #                                                                 "expected".format(resource_param, expt_change)

        LOG.tc_step('Pause/Unpause {} vm'.format(self.vif_model))
        vm_helper.pause_vm(self.vm_id)
        vm_helper.unpause_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after pause/unpause"
        )
        self.wait_check_vm_states(step='pause/unpause')

        LOG.tc_step('Suspend/Resume {} vm'.format(self.vif_model))
        vm_helper.suspend_vm(self.vm_id)
        vm_helper.resume_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after suspend/resume"
        )
        self.wait_check_vm_states(step='suspend/resume')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Cold migrate {} vm'.format(self.vif_model))
        code, msg = vm_helper.cold_migrate_vm(self.vm_id,
                                              fail_ok=migrate_forbidden)
        if migrate_forbidden:
            assert code > 0, "Expect migrate fail due to no other host has pcipt/sriov and pci-alias on same numa. " \
                             "Actual: {}".format(msg)
        self.wait_check_vm_states(step='cold-migrate')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after cold migration"
        )
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Set vm to error and wait for it to be auto recovered')
        vm_helper.set_vm_state(vm_id=self.vm_id,
                               error_state=True,
                               fail_ok=False)
        vm_helper.wait_for_vm_values(vm_id=self.vm_id,
                                     status=VMStatus.ACTIVE,
                                     fail_ok=False,
                                     timeout=600)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after auto recovery"
        )
        self.wait_check_vm_states(step='set-error-state-recover')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step("Hard reboot {} vm".format(self.vif_model))
        vm_helper.reboot_vm(self.vm_id, hard=True)
        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after nova reboot hard"
        )
        self.wait_check_vm_states(step='hard-reboot')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step(
            "Create a flavor with dedicated cpu policy and resize vm to new flavor"
        )
        resize_flavor = nova_helper.create_flavor(name='dedicated',
                                                  ram=2048,
                                                  cleanup='function')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=resize_flavor, **extra_specs)
        vm_helper.resize_vm(self.vm_id, resize_flavor)

        LOG.tc_step("Check vm still reachable after resize")
        self.wait_check_vm_states(step='resize')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])
def test_robustness_service_function_chaining(protocol, nsh_aware, same_host,
                                              add_protocol, symmetric,
                                              check_system,
                                              add_admin_role_module):
    """
        Test Service Function Chaining

        Test Steps:
            - Check if the system is compatible
            - Boot the source VM, dest VM & SFC VM in same host or diff host based on <same_host: True or False>
            - Install necessary software and package inside guest for packet forwarding test
            - Create port pair using nsh_ware <True:False>
            - Create port pair group
            - Create SFC flow classifier using protocol <tcp:icmp:udp>
            - Create port Chain
            - Check packet forwarding from source to dest vm via SFC vm
            - Migrate VM by force_lock compute host
            - Check packet forwarding from source to dest vm via SFC vm
            - Create new flow classifier with new protocol (add_protocol)
            - Update port chain with new flow classifier
            - Check packet forwarding from source to dest vm via SFC vm with new classifier
            - Evacuate VM by rebooting compute host
            - Verify VM evacuated
            - Check packet forwarding from source to dest vm via SFC vm with new classifier

        Test Teardown:
            - Delete port chain, port pair group, port pair, flow classifier, vms, volumes created

    """
    nsh_aware = True if nsh_aware == 'nsh_aware' else False
    same_host = True if same_host == 'same_host' else False
    symmetric = True if symmetric == 'symmetric' else False

    LOG.tc_step("Check if the system is compatible to run this test")
    computes = check_system

    LOG.tc_step("Boot the VM in same host: {}".format(same_host))
    hosts_to_boot = [computes[0]] * 3 if same_host else computes[0:3]
    LOG.info("Boot the VM in following compute host 1:{}, 2:{}, 3:{}".format(
        hosts_to_boot[0], hosts_to_boot[1], hosts_to_boot[2]))

    LOG.tc_step("Boot the source and dest VM")
    vm_ids = []
    vm_ids, source_vm_id, dest_vm_id, internal_net_id, mgmt_net_id, mgmt_nic = _setup_vm(
        vm_ids, hosts_to_boot)
    vm_helper.ping_vms_from_vm(to_vms=source_vm_id,
                               from_vm=dest_vm_id,
                               net_types=['mgmt'],
                               retry=10)

    LOG.tc_step("Boot the SFC VM")
    sfc_vm_ids = []
    sfc_vm_ids, sfc_vm_under_test, ingress_port_id, egress_port_id = _setup_sfc_vm(
        sfc_vm_ids, hosts_to_boot, mgmt_nic, internal_net_id)
    vm_helper.ping_vms_from_vm(to_vms=source_vm_id,
                               from_vm=sfc_vm_under_test,
                               net_types=['mgmt'],
                               retry=10)

    # if protocol != 'icmp':
    LOG.tc_step("Install software package nc in vm {} {}".format(
        source_vm_id, dest_vm_id))
    _install_sw_packages_in_vm(source_vm_id)
    _install_sw_packages_in_vm(dest_vm_id)

    LOG.tc_step("copy vxlan tool in sfc vm {}".format(sfc_vm_under_test))
    vm_helper.scp_to_vm_from_natbox(vm_id=sfc_vm_under_test,
                                    source_file='/home/cgcs/sfc/vxlan_tool.py',
                                    dest_file='/root/vxlan_tool.py')

    LOG.tc_step("Create port pair")
    port_pair_ids = []
    port_pair_id = _setup_port_pair(nsh_aware, ingress_port_id, egress_port_id)
    port_pair_ids.append(port_pair_id)

    LOG.tc_step("Create port pair group")
    port_pair_group_ids = []
    port_pair_group_id = _setup_port_pair_groups(port_pair_id)
    port_pair_group_ids.append(port_pair_group_id)

    name = 'sfc_flow_classifier'
    LOG.tc_step("Create flow classifier:{}".format(name))
    flow_classifier, dest_vm_internal_net_ip = _setup_flow_classifier(
        name, source_vm_id, dest_vm_id, protocol)

    LOG.tc_step("Create port chain")
    port_chain_id = _setup_port_chain(port_pair_group_id, flow_classifier,
                                      symmetric)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)

    LOG.tc_step("Force lock {}".format(hosts_to_boot))
    if not same_host:
        for host_to_boot in hosts_to_boot:
            HostsToRecover.add(host_to_boot)
            lock_code, lock_output = host_helper.lock_host(host_to_boot,
                                                           force=True,
                                                           check_first=True)
            assert lock_code == 0, "Failed to force lock {}. Details: {}".format(
                host_to_boot, lock_output)
    else:
        HostsToRecover.add(hosts_to_boot[0])
        lock_code, lock_output = host_helper.lock_host(hosts_to_boot[0],
                                                       force=True,
                                                       check_first=True)
        assert lock_code == 0, "Failed to force lock {}. Details: {}".format(
            hosts_to_boot[0], lock_output)

    # Expect VMs to migrate off force-locked host (non-gracefully)
    LOG.tc_step(
        "Wait for 'Active' status of VMs after host force lock completes")
    vm_helper.wait_for_vms_values(vm_ids, fail_ok=False)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)

    LOG.tc_step(
        "Create new flow classifier with protocol {}".format(add_protocol))
    flow_classifier_name = 'new_sfc_flow_classifier'
    new_flow_classifier, dest_vm_internal_net_ip = _setup_flow_classifier(
        flow_classifier_name, source_vm_id, dest_vm_id, add_protocol)

    LOG.tc_step("Update port chain with new flow classifier:".format(
        new_flow_classifier))
    network_helper.set_sfc_port_chain(port_chain_id,
                                      port_pair_groups=port_pair_group_id,
                                      flow_classifiers=new_flow_classifier)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(add_protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       add_protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)

    LOG.info("Get the host to reboot where the VMs launched")
    hosts_to_reboot = vm_helper.get_vms_hosts(vm_ids=vm_ids)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            hosts_to_reboot))
    vm_helper.evacuate_vms(host=hosts_to_reboot,
                           vms_to_check=vm_ids,
                           ping_vms=True)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(add_protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       add_protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)