示例#1
0
def test_heat_vm_scale_after_actions(vm_scaling_stack, actions):
    """
    Test VM auto scaling with swact:
        Create heat stack for auto scaling using NestedAutoScale.yaml,  swact and perform vm scale up and down.

    Test Steps:
        - Create a heat stack for auto scaling vm ()
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - live migrate the vm if not sx
        - cold migrate the vm if not sx
        - swact if not sx
        - reboot -f vm host
        - trigger auto scale by boosting cpu usage in the vm (using dd)
        - verify it scale up to the max number of vms (3)
        - trigger scale down by killing dd in the vm
        - verify the vm scale down to min number (1)
        - Delete Heat stack and verify resource deletion
    """
    stack_name, vm_id = vm_scaling_stack
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if not system_helper.is_aio_simplex():
        actions = actions.split('-')
        if "swact" in actions:
            LOG.tc_step("Swact before scale in/out")
            host_helper.swact_host()

        if "live_migrate" in actions:
            LOG.tc_step("live migrate vm before scale in/out")
            vm_helper.live_migrate_vm(vm_id)

        if "cold_migrate" in actions:
            LOG.tc_step("cold migrate vm before scale in/out")
            vm_helper.cold_migrate_vm(vm_id)

    if "host_reboot" in actions:
        if system_helper.is_aio_simplex():
            host_helper.reboot_hosts('controller-0')
            vm_helper.wait_for_vm_status(vm_id,
                                         status=VMStatus.ACTIVE,
                                         timeout=600,
                                         check_interval=10,
                                         fail_ok=False)
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id, timeout=VMTimeout.DHCP_RETRY)
        else:
            LOG.tc_step("evacuate vm before scale in/out")
            vm_host = vm_helper.get_vm_host(vm_id=vm_id)
            vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id)

    LOG.tc_step(
        "Wait for {} vms to auto scale out to {} after running dd in vm(s)".
        format(stack_name, 3))
    vm_helper.wait_for_auto_vm_scale_out(stack_name, expt_max=3)

    LOG.tc_step(
        "Wait for {} vms to auto scale in to {} after killing dd processes in vms"
        .format(stack_name, 1))
    vm_helper.wait_for_auto_vm_scale_in(stack_name, expt_min=1)
示例#2
0
    def test_evacuate_vms(self, vms_):
        """
        Test evacuated vms
        Args:
            vms_: (fixture to create vms)

        Pre-requisites:
            - At least two up hypervisors on system

        Test Steps:
            - Create vms with various options:
                - vm booted from cinder volume,
                - vm booted from glance image,
                - vm booted from glance image, and have an extra cinder
                volume attached after launch,
                - vm booed from cinder volume with ephemeral and swap disks
            - Move vms onto same hypervisor
            - sudo reboot -f on the host
            - Ensure vms are successfully evacuated to other host
            - Live migrate vms back to original host
            - Check vms can move back, and vms are still reachable from natbox
            - Check system services are enabled and neutron agents are alive

        """
        vms, target_host = vms_

        pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable(
            timeout=20, fail_ok=True)
        up_hypervisors = host_helper.get_up_hypervisors()
        pre_res_neutron, pre_msg_neutron = \
            network_helper.wait_for_agents_healthy(
                up_hypervisors, timeout=20, fail_ok=True)

        LOG.tc_step(
            "reboot -f on vms host, ensure vms are successfully evacuated and "
            "host is recovered after reboot")
        vm_helper.evacuate_vms(host=target_host,
                               vms_to_check=vms,
                               wait_for_host_up=True,
                               ping_vms=True)

        LOG.tc_step("Check rebooted host can still host vm")
        vm_helper.live_migrate_vm(vms[0], destination_host=target_host)
        vm_helper.wait_for_vm_pingable_from_natbox(vms[0])

        LOG.tc_step("Check system services and neutron agents after {} "
                    "reboot".format(target_host))
        post_res_sys, post_msg_sys = system_helper.wait_for_services_enable(
            fail_ok=True)
        post_res_neutron, post_msg_neutron = \
            network_helper.wait_for_agents_healthy(hosts=up_hypervisors,
                                                   fail_ok=True)

        assert post_res_sys, "\nPost-evac system services stats: {}" \
                             "\nPre-evac system services stats: {}". \
            format(post_msg_sys, pre_msg_sys)
        assert post_res_neutron, "\nPost evac neutron agents stats: {}" \
                                 "\nPre-evac neutron agents stats: {}". \
            format(pre_msg_neutron, post_msg_neutron)
示例#3
0
def test_evacuate_vms_stress(add_hosts_to_zone):
    """
    Test evacuate vms with various vm storage configs and host instance backing configs

    Args:
        storage_backing: storage backing under test
        add_admin_role_class (None): test fixture to add admin role to primary tenant

    Skip conditions:
        - Less than two hosts configured with storage backing under test

    Setups:
        - Add admin role to primary tenant (module)

    Test Steps:
        - Create flv_rootdisk without ephemeral or swap disks, and set storage backing extra spec
        - Create flv_ephemswap with ephemeral AND swap disks, and set storage backing extra spec
        - Boot following vms on same host and wait for them to be pingable from NatBox:
            - Boot vm1 from volume with flavor flv_rootdisk
            - Boot vm2 from volume with flavor flv_localdisk
            - Boot vm3 from image with flavor flv_rootdisk
            - Boot vm4 from image with flavor flv_rootdisk, and attach a volume to it
            - Boot vm5 from image with flavor flv_localdisk
        - power-off host from vlm
        - Ensure evacuation for all 5 vms are successful (vm host changed, active state, pingable from NatBox)
        - Repeat above evacuation steps

    Teardown:
        - Delete created vms, volumes, flavors
        - Remove admin role from primary tenant (module)

    """
    storage_backing, hosts = add_hosts_to_zone
    zone = 'cgcsauto'

    HostsToRecover.add(hosts)

    initial_host = hosts[0]

    vms = vm_helper.boot_vms_various_types(storage_backing=storage_backing, target_host=initial_host, avail_zone=zone)

    target_host = initial_host

    for i in range(100):
        post_host = hosts[0] if target_host != hosts[0] else hosts[1]
        LOG.info("\n===============Iteration {}============".format(i+1))
        vm_helper.evacuate_vms(target_host, vms, wait_for_host_up=True, post_host=post_host, timeout=720, vlm=True,
                               ping_vms=True)

        target_host = post_host
        LOG.info("Rest for 120 seconds before next evacuation")
        time.sleep(120)
示例#4
0
    def test_multiports_on_same_network_evacuate_vm(self, vifs,
                                                    check_avs_pattern,
                                                    base_setup):
        """
        Test evacuate vm with multiple ports on same network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm with specified vif model
            base_setup (tuple): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant
                network with base vm,
            and ping it from NatBox     (class)
            - Ping vm2's own data network ips       (class)
            - Ping vm2 from vm1 to verify management and data networks connection   (class)

        Test Steps:
            - Reboot vm2 host
            - Wait for vm2 to be evacuated to other host
            - Wait for vm2 pingable from NatBox
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks still works

        Teardown:
            - Delete created vms and flavor
        """

        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = base_setup
        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        host = vm_helper.get_vm_host(vm_under_test)

        LOG.tc_step("Reboot vm host {}".format(host))
        vm_helper.evacuate_vms(host=host,
                               vms_to_check=vm_under_test,
                               ping_vms=True)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management and data networks "
            "still works after evacuation.")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                   from_vm=base_vm,
                                   net_types=['mgmt', 'data'])
示例#5
0
def test_snat_evacuate_vm(snat_setups, snat):
    """
    Test VM external access after evacuation.

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.
        snat (bool): whether or not to enable SNAT on router

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Ping VM from NatBox
        - Reboot vm host
        - Verify vm is evacuated to other host
        - Verify vm can still ping outside

    Test Teardown:
        - Delete the created vm     (module)
        - Disable snat  (module)

    """
    vm_ = snat_setups[0]

    snat = True if snat == 'snat_enabled' else False
    LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat))
    network_helper.set_router_gateway(enable_snat=snat)

    time.sleep(30)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)

    host = vm_helper.get_vm_host(vm_)

    LOG.tc_step("Ping VM from NatBox".format(vm_))
    vm_helper.ping_vms_from_natbox(vm_, use_fip=False)
    # vm_helper.ping_vms_from_natbox(vm_, use_fip=True)

    LOG.tc_step("Evacuate vm")
    vm_helper.evacuate_vms(host=host, vms_to_check=vm_)

    LOG.tc_step("Verify vm can still ping outside")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat, timeout=VMTimeout.DHCP_RETRY)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    host_helper.wait_for_hosts_ready(hosts=host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=False)
    if snat:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)
示例#6
0
def test_evacuate_dpdk_and_vhost_vms(add_admin_role_func):
    """
    Skip:
        - Less than 2 up hypervisors with same storage config available on system
    Setups:
        - Add admin role to tenant user under test
    Test Steps:
        - Launch 3 vms on same host with following configs:
            - dpdk vm with 2 vcpus
            - vhost vm with 2 vcpus
            - vhost vm with 3 vcpus
        - sudo reboot -f on vm host
        - Check vms are moved to other host, in active state, and are pingable after evacuation
    Teardown:
        - Remove admin role from tenant user
        - Wait for failed host to recover
        - Delete created vms
    """
    hosts = host_helper.get_up_hypervisors()
    if len(hosts) < 2:
        skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS)

    LOG.tc_step("Boot an observer VM")
    vm_observer = launch_vm(vm_type='dpdk', num_vcpu=2, host=hosts[1])
    vm_helper.setup_avr_routing(vm_observer)

    LOG.tc_step("Launch dpdk and vhost vms")
    vms = []
    vm_host = hosts[0]
    for vm_info in (('dpdk', 3), ('vhost', 2), ('vhost', 3)):
        vm_type, num_vcpu = vm_info
        vm_id = launch_vm(vm_type=vm_type, num_vcpu=num_vcpu, host=vm_host)
        vm_helper.setup_avr_routing(vm_id, vm_type=vm_type)
        vms.append(vm_id)

    LOG.tc_step(
        "Ensure dpdk and vhost vms interfaces are reachable before evacuate")
    vm_helper.ping_vms_from_vm(vms,
                               vm_observer,
                               net_types=['data', 'internal'],
                               vshell=True)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vms, ping_vms=True)
    vm_helper.ping_vms_from_vm(vms,
                               vm_observer,
                               net_types=['data', 'internal'],
                               vshell=True)
示例#7
0
def test_host_reboot_secure_boot_vm():
    """
    This is to test host evacuation for secure boot vm

    :return:
    """
    guests_os = ['trusty_uefi', 'uefi_shell']
    disk_format = ['qcow2', 'raw']
    image_ids = []
    volume_ids = []
    for guest_os, disk_format in zip(guests_os, disk_format):
        image_ids.append(
            create_image_with_metadata(
                guest_os=guest_os,
                property_key=ImageMetadata.FIRMWARE_TYPE,
                values=['uefi'],
                disk_format=disk_format,
                container_format='bare'))
    # create a flavor
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1]
    ResourceCleanup.add('flavor', flavor_id)
    # boot a vm using the above image
    for image_id in image_ids:
        volume_ids.append(
            cinder_helper.create_volume(source_id=image_id[0],
                                        size=5,
                                        cleanup='function')[1])

    block_device_dic = [{
        'id': volume_ids[1],
        'source': 'volume',
        'bootindex': 0
    }, {
        'id': volume_ids[0],
        'source': 'volume',
        'bootindex': 1
    }]

    vm_id = vm_helper.boot_vm(name='sec-boot-vm',
                              source='block_device',
                              flavor=flavor_id,
                              block_device=block_device_dic,
                              cleanup='function',
                              guest_os=guests_os[0])[1]
    _check_secure_boot_on_vm(vm_id=vm_id)

    compute_host = vm_helper.get_vm_host(vm_id=vm_id)
    vm_helper.evacuate_vms(compute_host, vms_to_check=vm_id, timeout=800)
    _check_secure_boot_on_vm(vm_id=vm_id)
    def trigger_evacuation(self):
        LOG.tc_step('Triggering evacuation on host: {} via action:{}'.format(
            self.current_host, self.operation))
        action = self.operation.lower()

        self.start_time = common.lab_time_now()[1]
        vms = [vm_dict['vm_id'] for vm_dict in self.vms_info.values()]

        if action in VALID_OPERATIONS:
            force_reboot = (action != 'reboot')
            vm_helper.evacuate_vms(host=self.current_host,
                                   force=force_reboot,
                                   vms_to_check=vms)
        else:
            skip('Not supported action:{}'.format(action))
        LOG.info('OK, triggered evacuation by {} host:{}'.format(
            self.operation, self.current_host))
示例#9
0
    def test_evacuate_vm(self, guest_os, boot_source):
        """
        Test evacuate VM with specified guest and boot source
        Args:
            guest_os (str): guest OS name
            boot_source (str): volume or image

        Setup:
            - Ensure sufficient space on system to create the required guest. Skip otherwise.

        Test Steps:
            - Boot a VM with given guest OS from specified boot source
            - Ensure VM is reachable from NatBox
            - 'sudo reboot -f' on vm host to evacuated it
            - Check vm is successfully evacuated - active state and reachable from NatBox

        Teardown:
            - Delete created vm, volume if any, and glance image

        """
        img_id = check_helper.check_fs_sufficient(guest_os=guest_os,
                                                  boot_source=boot_source)

        source_id = img_id if boot_source == 'image' else None
        LOG.tc_step("Boot a {} VM from {}".format(guest_os, boot_source))
        vm_id = vm_helper.boot_vm(name="{}_{}".format(guest_os, boot_source),
                                  source=boot_source,
                                  source_id=source_id,
                                  guest_os=guest_os,
                                  cleanup='function')[1]

        LOG.tc_step("Wait for VM pingable from NATBox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        vm_host = vm_helper.get_vm_host(vm_id)
        LOG.tc_step("Reboot VM host {}".format(vm_host))
        vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id, ping_vms=True)
示例#10
0
 def operation(vm_id_, host_):
     vm_helper.evacuate_vms(host=host_,
                            vms_to_check=vm_id_,
                            ping_vms=True)
示例#11
0
def test_evacuate_pci_vm(vif_model_check):
    """
    Test evacuate vm with multiple ports on same network

    Args:

    Setups:
        - create a flavor with dedicated cpu policy (module)
        - choose one tenant network and one internal network to be used by test (module)
        - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
        - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
        and ping it from NatBox     (class)
        - Ping vm2's own data network ips       (class)
        - Ping vm2 from vm1 to verify management and data networks connection   (class)

    Test Steps:
        - Reboot vm2 host
        - Wait for vm2 to be evacuated to other host
        - Wait for vm2 pingable from NatBox
        - Verify ping from vm1 to vm2 over management and data networks still works

    Teardown:
        - Delete created vms and flavor
    """
    vif_model, base_vm, flavor_id, nics_to_test, seg_id, net_type, pnet_name, extra_pcipt_net = vif_model_check

    LOG.tc_step("Boot a vm with {} vif model on {} net".format(
        vif_model, net_type))
    res, vm_id, err = vm_helper.boot_vm(name=vif_model,
                                        flavor=flavor_id,
                                        cleanup='function',
                                        nics=nics_to_test)
    assert 0 == res, "VM is not booted successfully. Error: {}".format(err)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id,
                                                   init_conf=True)

    LOG.tc_step("Ping vm over mgmt and {} nets from base vm".format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    host = vm_helper.get_vm_host(vm_id)

    # Remove the following ssh VM to sync code once CGTS-9279 is fixed
    LOG.tc_step("Login in to VM & do sync command")
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        vm_ssh.exec_sudo_cmd('sync')

    LOG.tc_step("Reboot vm host {}".format(host))
    vm_helper.evacuate_vms(host=host,
                           vms_to_check=vm_id,
                           ping_vms=True,
                           wait_for_host_up=False)

    if 'pci-passthrough' == vif_model:
        LOG.tc_step(
            "Add vlan to pci-passthrough interface for VM again after evacuation due to interface change."
        )
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                   net_seg_id=seg_id)

    LOG.tc_step(
        "Check vm still pingable over mgmt, and {} nets after evacuation".
        format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    LOG.tc_step(
        "Wait for rebooted host {} to recover and ensure vm are still reachable"
        .format(host))
    host_helper.wait_for_hosts_ready(hosts=host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
    vm_helper.ping_vms_from_vm(to_vms=vm_id,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])
示例#12
0
def test_port_trunking():
    """
    Port trunking feature test cases

    Test Steps:
        - Create networks
        - Create subnets
        - Create a parent port and subports
        - Create a truck with parent port and subports
        - Boot the first vm with the trunk
        - Create the second trunk without subport
        - Boot the second vm
        - Add support to trunk
        - Configure vlan interfaces inside guests
        - Verify connectivity via vlan interfaces
        - Remove the subport from trunk and verify connectivity
        - Add the support to trunk and verify connectivity
        - Do vm actions and verify connectivity


    Test Teardown:
        - Delete vms, ports, subnets, and networks created

    """
    vif_model = 'avp' if system_helper.is_avs() else None
    network_names = ['network11', 'network12', 'network13']
    net_ids = []
    sub_nets = ["30.0.0.0/24", "30.0.1.0/24", "30.0.2.0/24"]
    subnet_ids = []
    # parent ports and sub ports for trunk 1 and trunk 2
    trunk1_parent_port = 'vrf10'
    trunk1_subport_1 = 'vrf11'
    trunk1_subport_2 = 'vrf12'

    trunk2_parent_port = 'host10'
    trunk2_subport_1 = 'host11'
    trunk2_subport_2 = 'host12'

    # vlan id for the subports
    segment_1 = 1
    segment_2 = 2

    LOG.tc_step("Create Networks to be used by trunk")
    for net in network_names:
        net_ids.append(
            network_helper.create_network(name=net, cleanup='function')[1])

    LOG.tc_step("Create Subnet on the Network Created")
    for sub, network in zip(sub_nets, net_ids):
        subnet_ids.append(
            network_helper.create_subnet(network=network,
                                         subnet_range=sub,
                                         gateway='none',
                                         cleanup='function')[1])

    # Create Trunks
    LOG.tc_step("Create Parent port for trunk 1")
    t1_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk1_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t1_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk1_parent_port)[0]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk1_subport_1,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk1_subport_2,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t1_sub_ports = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t1_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 1")
    trunk1_id = network_helper.create_trunk(t1_parent_port_id,
                                            name='trunk-1',
                                            sub_ports=t1_sub_ports,
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{'net-id': mgmt_net_id}, {'port-id': t1_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm_id = vm_helper.boot_vm(name='vm-with-trunk1-port',
                              nics=nics,
                              cleanup='function')[1]
    LOG.tc_step("Setup vlan interfaces inside guest")
    _bring_up_vlan_interface(vm_id, 'eth1', [segment_1])

    # Create second trunk port  with out the subports and vm
    LOG.tc_step("Create Parent port for trunk 2")
    t2_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk2_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t2_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk2_parent_port)[0]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk2_subport_1,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk2_subport_2,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t2_sub_ports = [{
        'port': t2_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t2_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 2")
    trunk2_id = network_helper.create_trunk(t2_parent_port_id,
                                            name='trunk-2',
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics_2 = [{'net-id': mgmt_net_id}, {'port-id': t2_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm2_id = vm_helper.boot_vm(name='vm-with-trunk2-port',
                               nics=nics_2,
                               cleanup='function')[1]

    LOG.tc_step("Add the sub ports to the second truck")
    network_helper.set_trunk(trunk2_id, sub_ports=t2_sub_ports)

    LOG.tc_step("Setup VLAN interfaces inside guest")
    _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

    # ping b/w 2 vms using the vlan interfaces
    eth_name = 'eth1.1'

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        ip_addr = network_helper.get_ip_for_eth(eth_name=eth_name,
                                                ssh_client=vm_ssh)

    if ip_addr:
        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step("Ping on vlan interface from guest")
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

    # unset the subport on trunk_1 and try the ping (it will fail)
    LOG.tc_step(
        "Removing a subport from trunk and ping on vlan interface inside guest"
    )
    ret_code_10 = network_helper.unset_trunk(trunk1_id,
                                             sub_ports=[t1_sub_port1_id])[0]
    assert ret_code_10 == 0, "Subports not removed as expected."

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        ping = network_helper.ping_server(ip_addr,
                                          ssh_client=vm2_ssh,
                                          num_pings=20,
                                          fail_ok=True)[0]
        assert ping == 100, "Ping did not fail as expected."

    # set the subport on trunk_1 and try the ping (it will work)
    LOG.tc_step(
        " Add back the subport to trunk and ping on vlan interface inside guest"
    )
    t1_sub_port = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }]
    network_helper.set_trunk(trunk1_id, sub_ports=t1_sub_port)

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    # VM operation and ping
    for vm_actions in [['pause', 'unpause'], ['suspend', 'resume'],
                       ['live_migrate'], ['cold_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm2_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm2_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management networks still works "
            "after {}".format(vm_actions))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm2_id,
                                   net_types=['mgmt'])

        if vm_actions[0] == 'cold_migrate':
            LOG.tc_step("Setup VLAN interfaces inside guest")
            _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step(
                "Ping on vlan interface from guest after action {}".format(
                    vm_actions))
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

        vm_host = vm_helper.get_vm_host(vm2_id)

        vm_on_target_host = vm_helper.get_vms_on_host(vm_host)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm2_id, ping_vms=True)

    for vm_id_on_target_host in vm_on_target_host:
        LOG.tc_step("Setup VLAN interfaces inside guest")
        _bring_up_vlan_interface(vm_id_on_target_host, 'eth1', [segment_1])

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest after evacuation")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    LOG.tc_step(
        "Attempt to delete trunk when in use, expect pass for AVS only")
    code = network_helper.delete_trunks(trunks=trunk1_id, fail_ok=True)[0]

    if system_helper.is_avs():
        assert 0 == code, "Failed to delete port trunk when it's used by a running VM wiht AVS"
    else:
        assert 1 == code, "Trunk is deleted when it's used by a running VM with OVS"
示例#13
0
def test_instantiate_a_vm_with_multiple_volumes_and_migrate():
    """
    Test  a vm with a multiple volumes live, cold  migration and evacuation:

    Test Setups:
    - get guest image_id
    - get or create 'small' flavor_id
    - get tenenat and managment network ids

    Test Steps:
    - create volume for boot and another extra size 8GB
    - boot vms from the created volume
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify VM status is ACTIVE
    - Attach the second volume to VM
    - Attempt live migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Attempt cold migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Reboot the compute host to initiate evacuation
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Terminate VMs

    Skip conditions:
    - less than two computes
    - less than one storage

    """
    # skip("Currently not working. Centos image doesn't see both volumes")
    LOG.tc_step("Creating a volume size=8GB.....")
    vol_id_0 = cinder_helper.create_volume(size=8)[1]
    ResourceCleanup.add('volume', vol_id_0, scope='function')

    LOG.tc_step("Creating a second volume size=8GB.....")
    vol_id_1 = cinder_helper.create_volume(size=8, bootable=False)[1]
    LOG.tc_step("Volume id is: {}".format(vol_id_1))
    ResourceCleanup.add('volume', vol_id_1, scope='function')

    LOG.tc_step("Booting instance vm_0...")

    vm_id = vm_helper.boot_vm(name='vm_0',
                              source='volume',
                              source_id=vol_id_0,
                              cleanup='function')[1]
    time.sleep(5)

    LOG.tc_step("Verify  VM can be pinged from NAT box...")
    rc, boot_time = check_vm_boot_time(vm_id)
    assert rc, "VM is not pingable after {} seconds ".format(boot_time)

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw(
        vm_id), 'vol_0 rootfs filesystem is not RW as expected.'

    LOG.tc_step("Attemping to attach a second volume to VM...")
    vm_helper.attach_vol_to_vm(vm_id, vol_id_1)

    LOG.tc_step(
        "Login to VM and to check filesystem is rw mode for both volumes....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'volumes rootfs ' \
                                                       'filesystem is not RW ' \
                                                       'as expected.'

    LOG.tc_step("Attemping live migrate VM...")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After live migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'

    LOG.tc_step("Attempting  cold migrate VM...")
    vm_helper.cold_migrate_vm(vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After cold migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'
    LOG.tc_step("Testing VM evacuation.....")
    before_host_0 = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Rebooting compute {} to initiate vm evacuation .....".format(
        before_host_0))
    vm_helper.evacuate_vms(host=before_host_0,
                           vms_to_check=vm_id,
                           ping_vms=True)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After evacuation ' \
                                                       'filesystem is not RW'
示例#14
0
def test_vm_with_large_volume_and_evacuation(vms_, pre_alarm_):
    """
   Test instantiate a vm with a large volume ( 20 GB and 40 GB) and evacuate:

    Args:
        vms_ (dict): vms created by vms_ fixture
        pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture

    Test Setups:
    - get tenant1 and management networks which are already created for lab
    setup
    - get or create a "small" flavor
    - get the guest image id
    - create two large volumes (20 GB and 40 GB) in cinder
    - boot two vms ( test_inst1, test_inst2) using  volumes 20 GB and 40 GB
    respectively


    Test Steps:
    - Verify VM status is ACTIVE
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify the VM can be pinged from NATBOX
    - Verify login to VM and rootfs (dev/vda) filesystem is rw mode
    - live migrate, if required, to bring both VMs to the same compute
    - Validate  migrated VM and no errors or alarms are present
    - Reboot compute host to initiate evacuation
    - Verify VMs are evacuated
    - Check for any system alarms
    - Verify login to VM and rootfs (dev/vda) filesystem is still rw mode
    after evacuation
    - Terminate VMs

    Skip conditions:
    - less that two computes
    - no  storage node

    """
    vm_ids = []
    for vm in vms_:
        vm_id = vm['id']
        vm_ids.append(vm_id)
        LOG.tc_step(
            "Checking VM status; VM Instance id is: {}......".format(vm_id))
        vm_state = vm_helper.get_vm_status(vm_id)
        assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \
                                            'ACTIVATE state as expected' \
            .format(vm_id, vm_state)

        LOG.tc_step("Verify  VM can be pinged from NAT box...")
        rc, boot_time = check_vm_boot_time(vm_id)
        assert rc, "VM is not pingable after {} seconds ".format(boot_time)

        LOG.tc_step("Verify Login to VM and check filesystem is rw mode....")
        assert is_vm_filesystem_rw(
            vm_id), 'rootfs filesystem is not RW as expected for VM {}' \
            .format(vm['display_name'])

    LOG.tc_step(
        "Checking if live migration is required to put the vms to a single "
        "compute....")
    host_0 = vm_helper.get_vm_host(vm_ids[0])
    host_1 = vm_helper.get_vm_host(vm_ids[1])

    if host_0 != host_1:
        LOG.tc_step("Attempting to live migrate  vm {} to host {} ....".format(
            (vms_[1])['display_name'], host_0))
        code, msg = vm_helper.live_migrate_vm(vm_ids[1],
                                              destination_host=host_0)
        LOG.tc_step("Verify live migration succeeded...")
        assert code == 0, "Live migration of vm {} to host {} did not " \
                          "success".format((vms_[1])['display_name'], host_0)

    LOG.tc_step("Verify both VMs are in same host....")
    assert host_0 == vm_helper.get_vm_host(
        vm_ids[1]), "VMs are not in the same compute host"

    LOG.tc_step(
        "Rebooting compute {} to initiate vm evacuation .....".format(host_0))
    vm_helper.evacuate_vms(host=host_0, vms_to_check=vm_ids, ping_vms=True)

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw((vms_[0])[
                                   'id']), 'After evacuation the rootfs ' \
                                           'filesystem is not RW as expected ' \
                                           'for VM {}'.format(
        (vms_[0])['display_name'])

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw((vms_[1])['id']), \
        'After evacuation the rootfs filesystem is not RW as expected ' \
        'for VM {}'.format((vms_[1])['display_name'])
示例#15
0
    def _test_evacuate_numa_setting(self, check_hosts):
        """
        Test evacuate vms with various vm numa node settings

        Skip conditions:
            - Less than two hosts with common storage backing with 2 numa nodes

        Setups:
            - Check if there are enough hosts with a common backing and 2 numa nodes to execute test
            - Add admin role to primary tenant (module)

        Test Steps:
            - Create three flavors:
                - First flavor has a dedicated cpu policy, 1 vcpu set on 1 numa node and the vm's numa_node0 is set
                  to host's numa_node0
                - Second flavor has a dedicated cpu policy, 1 vcpu set on 1 numa node and the vm's numa_node0 is set
                  to host's numa_node1
                - Third flavor has a dedicated cpu policy, 2 vcpus split between 2 different numa nodes and the vm's
                  numa_node0 is set to host's numa_node0 and vm's numa_node1 is set to host's numa_node1
            - Boot vms from each flavor on same host and wait for them to be pingable from NatBox
            - Check that the vm's topology is correct
            - sudo reboot -f on vms host
            - Ensure evacuation for all 5 vms are successful (vm host changed, active state, pingable from NatBox)
            - Check that the vm's topology is still correct following the evacuation

        Teardown:
            - Delete created vms, volumes, flavors
            - Remove admin role from primary tenant (module)

        """
        target_host = check_hosts

        LOG.tc_step("Create flavor with 1 vcpu, set on host numa node 0")
        flavor1 = nova_helper.create_flavor('numa_vm', vcpus=1)[1]
        ResourceCleanup.add('flavor', flavor1, scope='function')
        extra_specs1 = {
            FlavorSpec.CPU_POLICY: 'dedicated',
            FlavorSpec.NUMA_NODES: 1,
            FlavorSpec.NUMA_0: 0
        }
        nova_helper.set_flavor(flavor1, **extra_specs1)

        LOG.tc_step("Create flavor with 1 vcpu, set on host numa node 1")
        flavor2 = nova_helper.create_flavor('numa_vm', vcpus=1)[1]
        ResourceCleanup.add('flavor', flavor2, scope='function')
        extra_specs2 = {
            FlavorSpec.CPU_POLICY: 'dedicated',
            FlavorSpec.NUMA_NODES: 1,
            FlavorSpec.NUMA_0: 1
        }
        nova_helper.set_flavor(flavor2, **extra_specs2)

        LOG.tc_step("Create flavor with 1 vcpu, set on host numa node 1")
        flavor3 = nova_helper.create_flavor('numa_vm', vcpus=2)[1]
        ResourceCleanup.add('flavor', flavor3, scope='function')
        extra_specs3 = {
            FlavorSpec.CPU_POLICY: 'dedicated',
            FlavorSpec.NUMA_NODES: 2,
            FlavorSpec.NUMA_0: 1,
            FlavorSpec.NUMA_1: 0
        }
        nova_helper.set_flavor(flavor3, **extra_specs3)

        LOG.tc_step("Boot vm with cpu on host node 0")
        vm1 = vm_helper.boot_vm(flavor=flavor1,
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm1)
        check_vm_numa_topology(vm1, 1, 0, None)

        LOG.tc_step("Boot vm with cpu on host node 1")
        vm2 = vm_helper.boot_vm(flavor=flavor2,
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm2)
        check_vm_numa_topology(vm2, 1, 1, None)

        LOG.tc_step(
            "Boot vm with cpus on host nodes 0 and 1, (virtual nodes are switched here)"
        )
        vm3 = vm_helper.boot_vm(flavor=flavor3,
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm3)
        check_vm_numa_topology(vm3, 2, 1, 0)

        LOG.tc_step("Check all VMs are booted on {}".format(target_host))
        vms_on_host = vm_helper.get_vms_on_host(hostname=target_host)
        vms = [vm1, vm2, vm3]
        assert set(vms) <= set(
            vms_on_host
        ), "VMs booted on host: {}. Current vms on host: {}".format(
            vms, vms_on_host)

        vm_helper.evacuate_vms(target_host, vms, ping_vms=True)

        check_vm_numa_topology(vm1, 1, 0, None)
        check_vm_numa_topology(vm2, 1, 1, None)
        check_vm_numa_topology(vm3, 2, 1, 0)
def test_robustness_service_function_chaining(protocol, nsh_aware, same_host,
                                              add_protocol, symmetric,
                                              check_system,
                                              add_admin_role_module):
    """
        Test Service Function Chaining

        Test Steps:
            - Check if the system is compatible
            - Boot the source VM, dest VM & SFC VM in same host or diff host based on <same_host: True or False>
            - Install necessary software and package inside guest for packet forwarding test
            - Create port pair using nsh_ware <True:False>
            - Create port pair group
            - Create SFC flow classifier using protocol <tcp:icmp:udp>
            - Create port Chain
            - Check packet forwarding from source to dest vm via SFC vm
            - Migrate VM by force_lock compute host
            - Check packet forwarding from source to dest vm via SFC vm
            - Create new flow classifier with new protocol (add_protocol)
            - Update port chain with new flow classifier
            - Check packet forwarding from source to dest vm via SFC vm with new classifier
            - Evacuate VM by rebooting compute host
            - Verify VM evacuated
            - Check packet forwarding from source to dest vm via SFC vm with new classifier

        Test Teardown:
            - Delete port chain, port pair group, port pair, flow classifier, vms, volumes created

    """
    nsh_aware = True if nsh_aware == 'nsh_aware' else False
    same_host = True if same_host == 'same_host' else False
    symmetric = True if symmetric == 'symmetric' else False

    LOG.tc_step("Check if the system is compatible to run this test")
    computes = check_system

    LOG.tc_step("Boot the VM in same host: {}".format(same_host))
    hosts_to_boot = [computes[0]] * 3 if same_host else computes[0:3]
    LOG.info("Boot the VM in following compute host 1:{}, 2:{}, 3:{}".format(
        hosts_to_boot[0], hosts_to_boot[1], hosts_to_boot[2]))

    LOG.tc_step("Boot the source and dest VM")
    vm_ids = []
    vm_ids, source_vm_id, dest_vm_id, internal_net_id, mgmt_net_id, mgmt_nic = _setup_vm(
        vm_ids, hosts_to_boot)
    vm_helper.ping_vms_from_vm(to_vms=source_vm_id,
                               from_vm=dest_vm_id,
                               net_types=['mgmt'],
                               retry=10)

    LOG.tc_step("Boot the SFC VM")
    sfc_vm_ids = []
    sfc_vm_ids, sfc_vm_under_test, ingress_port_id, egress_port_id = _setup_sfc_vm(
        sfc_vm_ids, hosts_to_boot, mgmt_nic, internal_net_id)
    vm_helper.ping_vms_from_vm(to_vms=source_vm_id,
                               from_vm=sfc_vm_under_test,
                               net_types=['mgmt'],
                               retry=10)

    # if protocol != 'icmp':
    LOG.tc_step("Install software package nc in vm {} {}".format(
        source_vm_id, dest_vm_id))
    _install_sw_packages_in_vm(source_vm_id)
    _install_sw_packages_in_vm(dest_vm_id)

    LOG.tc_step("copy vxlan tool in sfc vm {}".format(sfc_vm_under_test))
    vm_helper.scp_to_vm_from_natbox(vm_id=sfc_vm_under_test,
                                    source_file='/home/cgcs/sfc/vxlan_tool.py',
                                    dest_file='/root/vxlan_tool.py')

    LOG.tc_step("Create port pair")
    port_pair_ids = []
    port_pair_id = _setup_port_pair(nsh_aware, ingress_port_id, egress_port_id)
    port_pair_ids.append(port_pair_id)

    LOG.tc_step("Create port pair group")
    port_pair_group_ids = []
    port_pair_group_id = _setup_port_pair_groups(port_pair_id)
    port_pair_group_ids.append(port_pair_group_id)

    name = 'sfc_flow_classifier'
    LOG.tc_step("Create flow classifier:{}".format(name))
    flow_classifier, dest_vm_internal_net_ip = _setup_flow_classifier(
        name, source_vm_id, dest_vm_id, protocol)

    LOG.tc_step("Create port chain")
    port_chain_id = _setup_port_chain(port_pair_group_id, flow_classifier,
                                      symmetric)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)

    LOG.tc_step("Force lock {}".format(hosts_to_boot))
    if not same_host:
        for host_to_boot in hosts_to_boot:
            HostsToRecover.add(host_to_boot)
            lock_code, lock_output = host_helper.lock_host(host_to_boot,
                                                           force=True,
                                                           check_first=True)
            assert lock_code == 0, "Failed to force lock {}. Details: {}".format(
                host_to_boot, lock_output)
    else:
        HostsToRecover.add(hosts_to_boot[0])
        lock_code, lock_output = host_helper.lock_host(hosts_to_boot[0],
                                                       force=True,
                                                       check_first=True)
        assert lock_code == 0, "Failed to force lock {}. Details: {}".format(
            hosts_to_boot[0], lock_output)

    # Expect VMs to migrate off force-locked host (non-gracefully)
    LOG.tc_step(
        "Wait for 'Active' status of VMs after host force lock completes")
    vm_helper.wait_for_vms_values(vm_ids, fail_ok=False)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)

    LOG.tc_step(
        "Create new flow classifier with protocol {}".format(add_protocol))
    flow_classifier_name = 'new_sfc_flow_classifier'
    new_flow_classifier, dest_vm_internal_net_ip = _setup_flow_classifier(
        flow_classifier_name, source_vm_id, dest_vm_id, add_protocol)

    LOG.tc_step("Update port chain with new flow classifier:".format(
        new_flow_classifier))
    network_helper.set_sfc_port_chain(port_chain_id,
                                      port_pair_groups=port_pair_group_id,
                                      flow_classifiers=new_flow_classifier)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(add_protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       add_protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)

    LOG.info("Get the host to reboot where the VMs launched")
    hosts_to_reboot = vm_helper.get_vms_hosts(vm_ids=vm_ids)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            hosts_to_reboot))
    vm_helper.evacuate_vms(host=hosts_to_reboot,
                           vms_to_check=vm_ids,
                           ping_vms=True)

    LOG.tc_step(
        "Execute vxlan.py tool and verify {} packet received VM1 to VM2".
        format(add_protocol))
    _check_packets_forwarded_in_sfc_vm(source_vm_id,
                                       dest_vm_id,
                                       sfc_vm_ids,
                                       dest_vm_internal_net_ip,
                                       add_protocol,
                                       nsh_aware,
                                       symmetric,
                                       load_balancing=False)
示例#17
0
    def test_evacuate_shared_cpu_vm(self, target_hosts, add_shared_cpu, add_admin_role_func):
        """
        Test that instance with shared vcpu can be evacuated and that the vm still has shared vcpu after evacuation

        Setup:
            - Configure at least two computes to have shared cpus via
                'system host-cpu-modify -f shared p0=1,p1=1 <hostname>' (module)

        Test Steps:
            - Create 2 VMs with shared vcpu
            - Boot a vm for each of the created flavors
            - Ensure all vms are booted successfully and validate the shared vcpus
            - Evacuate the vms
            - Ensure evacuation is successful and validate the shared vcpus

        Teardown:
            - Delete created vms and flavors
            - Set shared cpus to 0 (default setting) on the compute node under test (module)

        """
        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu
        vm_helper.delete_vms()
        prev_total_vcpus = host_helper.get_vcpus_for_computes()

        target_host = shared_cpu_hosts[0]
        vms = []
        vcpus = 2
        shared_vcpu = 1
        pcpus = vcpus - shared_vcpu
        expt_increase = 0
        LOG.tc_step("Create two 2 vcpu VMs each with 1 shared vcpu")
        flv_id = create_shared_flavor(vcpus=vcpus, shared_vcpu=shared_vcpu, storage_backing=storage_backing)
        for _ in range(2):

            vm_id = vm_helper.boot_vm(name='shared_cpu', flavor=flv_id, fail_ok=False, avail_zone='nova',
                                      vm_host=target_host, cleanup='function')[1]
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

            expt_increase += pcpus
            LOG.tc_step("Check vm {} numa node setting via vm-topology".format(vm_id))
            check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=prev_total_vcpus,
                              expt_increase=expt_increase)
            vms.append(vm_id)

        LOG.tc_step("Evacuate vms")
        vm_helper.evacuate_vms(target_host, vms_to_check=vms, ping_vms=True)

        vm_hosts = []
        LOG.tc_step("Check shared vcpus and numa settings for vms after evacuation")
        for vm_ in vms:
            vm_host = vm_helper.get_vm_host(vm_id=vm_)
            vm_hosts.append(vm_host)

        if len(list(set(vm_hosts))) == 1:
            post_evac_expt_increase = pcpus * 2
        else:
            post_evac_expt_increase = pcpus

        for vm_ in vms:
            check_shared_vcpu(vm=vm_, expt_increase=post_evac_expt_increase,
                              prev_total_vcpus=prev_total_vcpus, shared_vcpu=shared_vcpu, vcpus=vcpus)
示例#18
0
def test_vcpu_model_evacuation(add_admin_role_func, cpu_models_supported):
    """
    Launch a set of vms with different cpu models and evacuate.

    Skip if:
        - Less than two hypervisors available for evacuation

    Setups:
        - Add admin role to tenant under test (in order to launch vm onto specific host)

    Test Steps:
        - Boot 4 vms from image or volume onto the same host with different cpu models set in flavor or image metadata
            - 3 of them will have the 3 latest supported vcpu models
            - 1 of them with Passthrough model
        - Reboot -f the vms host all to trigger an evacuation
        - Ensure evacuation for all vms is successful (vm host changed, active state, pingable from NatBox)
        - Check VMs retained their correct cpu models

    Teardown:
            - Delete created vms
            - Remove admin role from primary tenant (module)
    """

    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    if not cpu_models_multi_host:
        skip("Less than two hypervisors available for evacuation")

    vm_dict = {}

    LOG.info("Create 3 vms with top 3 vcpu models from: {}".format(
        cpu_models_multi_host))
    target_host = None
    boot_source = 'image'
    flv_model = None
    for i in range(3):
        for vcpu_model in cpu_models_multi_host:
            if flv_model:
                img_model = vcpu_model
                flv_model = None
            else:
                img_model = None
                flv_model = vcpu_model
            code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                                img_model=img_model,
                                                boot_source=boot_source,
                                                avail_zone='nova',
                                                vm_host=target_host)
            assert 0 == code, "Failed to launch vm with {} cpu model. Details: {}".format(
                vcpu_model, msg)

            vm_helper.wait_for_vm_pingable_from_natbox(vm)
            check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model)
            vm_dict[vm] = vcpu_model

            boot_source = 'image' if boot_source == 'volume' else 'volume'
            if len(vm_dict) == 3:
                break
            if not target_host:
                target_host = vm_helper.get_vm_host(vm_id=vm)

        if len(vm_dict) == 3:
            break

    # Create a Passthrough VM
    code, vm, msg = _boot_vm_vcpu_model('Passthrough',
                                        None,
                                        boot_source,
                                        avail_zone='nova',
                                        vm_host=target_host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm)
    expt_arch = host_helper.get_host_cpu_model(target_host)
    check_vm_cpu_model(vm_id=vm, vcpu_model='Passthrough', expt_arch=expt_arch)
    vm_dict[vm] = 'Passthrough'

    LOG.tc_step(
        "Reboot target host {} to start evacuation".format(target_host))
    vm_helper.evacuate_vms(target_host, list(vm_dict.keys()), ping_vms=True)

    LOG.tc_step("Check vcpu models unchanged after evacuation")
    for vm_, cpu_ in vm_dict.items():
        post_evac_expt_arch = None
        LOG.info("Check vm {} has cpu model {} after evac".format(vm_, cpu_))

        if cpu_ == 'Passthrough':
            post_evac_expt_arch = expt_arch
        check_vm_cpu_model(vm_id=vm_,
                           vcpu_model=cpu_,
                           expt_arch=post_evac_expt_arch)
示例#19
0
def test_snat_computes_lock_reboot(snat_setups):
    """
    test vm external access after host compute reboot with all rest of computes locked

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Steps:
        - Ping VM {} from NatBox
        - Lock all nova hosts except the vm host
        - Ping external from vm
        - Reboot VM host
        - Wait for vm host to complete reboot
        - Verify vm is recovered after host reboot complete and can still ping outside

    Test Teardown:
        - Unlock all hosts
        - Delete the created vm     (module)
        - Disable SNAT on router    (module)

    """
    hypervisors = host_helper.get_hypervisors(state='up')
    if len(hypervisors) > 3:
        skip("More than 3 hypervisors on system. Skip to reduce run time.")

    vm_ = snat_setups[0]
    LOG.tc_step("Ping VM {} from NatBox".format(vm_))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)

    vm_host = vm_helper.get_vm_host(vm_)
    LOG.info("VM host is {}".format(vm_host))
    assert vm_host in hypervisors, "vm host is not in nova hypervisor-list"

    hosts_should_lock = set(hypervisors) - {vm_host}
    hosts_already_locked = set(system_helper.get_hosts(administrative='locked'))
    hosts_to_lock = list(hosts_should_lock - hosts_already_locked)
    LOG.tc_step("Lock all compute hosts {} except vm host {}".format(hosts_to_lock, vm_host))
    for host_ in hosts_to_lock:
        HostsToRecover.add(host_, scope='function')
        host_helper.lock_host(host_, swact=True)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_, timeout=120)
    LOG.tc_step("Ping external from vm {}".format(vm_))
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Evacuate vm and expect VM to stay on same host")
    code, output = vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_, fail_ok=True)
    assert code > 0, "Actual: {}".format(output)

    LOG.tc_step("Verify vm is recovered and can still ping outside")
    host_helper.wait_for_hosts_ready(hosts=vm_host)
    vm_helper.wait_for_vm_status(vm_id=vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_, use_fip=True, timeout=60)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)
示例#20
0
    def test_multiports_on_same_network_pci_evacuate_vm(self, base_setup_pci,
                                                        vifs):
        """
        Test evacuate vm with multiple ports on same network

        Args:
            base_setup_pci (tuple): base vm id, vm under test id, segment id
                for internal0-net1
            vifs (list): list of vifs to add to same internal net

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used
            by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping
            it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with base vm,
            and ping it from NatBox     (class)
            - Ping vm2's own data network ips       (class)
            - Ping vm2 from vm1 to verify management and internal networks
            connection   (class)

        Test Steps:
            - Reboot vm2 host
            - Wait for vm2 to be evacuated to other host
            - Wait for vm2 pingable from NatBox
            - Verify ping from vm1 to vm2 over management and internal
            networks still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        internal_net_id = None
        pcipt_included = False
        nics = copy.deepcopy(base_nics)
        if 'pci-passthrough' in vifs:
            if not avail_pcipt_net:
                skip(SkipHostIf.PCIPT_IF_UNAVAIL)
            pcipt_included = True
            internal_net_id = avail_pcipt_net
            if extra_pcipt_net:
                nics.append(
                    {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})
        if 'pci-sriov' in vifs:
            if not avail_sriov_net:
                skip(SkipHostIf.SRIOV_IF_UNAVAIL)
            internal_net_id = avail_sriov_net
        assert internal_net_id, "test script error. sriov or pcipt has to be " \
                                "included."

        for vif in vifs:
            nics.append({'net-id': internal_net_id, 'vif-model': vif})

        LOG.tc_step(
            "Boot a vm with following vifs on same network internal0-net1: "
            "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci_evac',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])
        vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, and "
            "internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        host = vm_helper.get_vm_host(vm_under_test)

        LOG.tc_step("Reboot vm host {}".format(host))
        vm_helper.evacuate_vms(host=host, vms_to_check=vm_under_test,
                               ping_vms=True)

        if pcipt_included:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device "
                "for vm {}.".format(vm_under_test))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management and "
            "internal networks still works after evacuation.")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'internal'])
示例#21
0
    def test_sriov_robustness(self, sriov_prep, add_admin_role_func):
        """
        Exhaust all CPUs on one compute by spawning VMs with 2 SR-IOV interface

        Args:
            sriov_prep: test fixture to set up test environment and get proper pci nets/hosts

        Setups:
            - select two hosts configured with same pci-sriov providernet
            - add the two hosts to cgcsauto aggregate to limit the vms host to the selected hosts
            - Select one network under above providernet

        Test Steps:
            - Boot 2+ pci-sriov vms with pci-sriov vif over selected network onto same host
            - Verify resource usage for providernet is increased as expected
            - Lock vms host and ensure vms are all migrated to other host
            - Verify vms' pci-sriov interfaces reachable and resource usage for pnet unchanged
            - 'sudo reboot -f' new vms host, and ensure vms are evacuated to initial host
            - Verify vms' pci-sriov interfaces reachable and resource usage for pnet unchanged

        Teardown:
            - Delete vms, volumes, flavor created
            - Remove admin role to tenant
            - Recover hosts if applicable
            - Remove cgcsauto aggregate     - class

        """
        net_type, pci_net, pci_hosts, pnet_id, nics, initial_host, other_host, vfs_use_init, vm_num, vm_vcpus = \
            sriov_prep
        vif_model = 'pci-sriov'

        # proc0_vm, proc1_vm = host_helper.get_logcores_counts(initial_host, functions='VMs')
        # if system_helper.is_hyperthreading_enabled(initial_host):
        #     proc0_vm *= 2
        #     proc1_vm *= 2
        # vm_vcpus = int(min(proc1_vm, proc0_vm) / (vm_num/2))

        # Create flavor with calculated vcpu number
        LOG.tc_step(
            "Create a flavor with dedicated cpu policy and {} vcpus".format(
                vm_vcpus))
        flavor_id = nova_helper.create_flavor(
            name='dedicated_{}vcpu'.format(vm_vcpus), ram=1024,
            vcpus=vm_vcpus)[1]
        ResourceCleanup.add('flavor', flavor_id, scope='module')
        extra_specs = {
            FlavorSpec.CPU_POLICY: 'dedicated',
        }
        # FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'}    # LP1854516
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        # Boot vms with 2 {} vifs each, and wait for pingable
        LOG.tc_step("Boot {} vms with 2 {} vifs each".format(
            vm_num, vif_model))
        vms = []
        for i in range(vm_num):
            sriov_nics = nics.copy()
            sriov_nic2 = sriov_nics[-1].copy()
            sriov_nic2['port-id'] = network_helper.create_port(
                net_id=sriov_nic2.pop('net-id'),
                vnic_type='direct',
                name='sriov_port')[1]
            sriov_nics.append(sriov_nic2)
            LOG.info("Booting vm{}...".format(i + 1))
            vm_id = vm_helper.boot_vm(flavor=flavor_id,
                                      nics=sriov_nics,
                                      cleanup='function',
                                      vm_host=initial_host,
                                      avail_zone='cgcsauto')[1]
            vms.append(vm_id)
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        check_vm_pci_interface(vms=vms, net_type=net_type)
        # TODO: feature unavailable atm. Update required
        # vfs_use_post_boot = nova_helper.get_provider_net_info(pnet_id, field='pci_vfs_used')
        # assert vfs_use_post_boot - vfs_use_init == vm_num * 2, "Number of PCI vfs used is not as expected"

        HostsToRecover.add(pci_hosts)

        LOG.tc_step("Lock host of {} vms: {}".format(vif_model, initial_host))
        host_helper.lock_host(host=initial_host, check_first=False, swact=True)

        LOG.tc_step(
            "Check vms are migrated to other host: {}".format(other_host))
        for vm in vms:
            vm_host = vm_helper.get_vm_host(vm_id=vm)
            assert other_host == vm_host, "VM did not move to {} after locking {}".format(
                other_host, initial_host)

        check_vm_pci_interface(vms,
                               net_type=net_type,
                               ping_timeout=VMTimeout.DHCP_RETRY)
        # TODO: feature unavailable atm. Update required
        # vfs_use_post_lock = nova_helper.get_provider_net_info(pnet_id, field='pci_vfs_used')
        # assert vfs_use_post_boot == vfs_use_post_lock, "Number of PCI vfs used after locking host is not as expected"

        LOG.tc_step("Unlock {}".format(initial_host))
        host_helper.unlock_host(initial_host)

        LOG.tc_step("Reboot {} and ensure vms are evacuated to {}".format(
            other_host, initial_host))
        vm_helper.evacuate_vms(other_host,
                               vms,
                               post_host=initial_host,
                               wait_for_host_up=True)
        check_vm_pci_interface(vms, net_type=net_type)
示例#22
0
    def test_evacuate_vms_with_inst_backing(self, hosts_per_backing,
                                            storage_backing):
        """
        Test evacuate vms with various vm storage configs and host instance
        backing configs

        Args:
            storage_backing: storage backing under test

        Skip conditions:
            - Less than two hosts configured with storage backing under test

        Setups:
            - Add admin role to primary tenant (module)

        Test Steps:
            - Create flv_rootdisk without ephemeral or swap disks, and set
            storage backing extra spec
            - Create flv_ephemswap with ephemeral AND swap disks, and set
            storage backing extra spec
            - Boot following vms on same host and wait for them to be
            pingable from NatBox:
                - Boot vm1 from volume with flavor flv_rootdisk
                - Boot vm2 from volume with flavor flv_localdisk
                - Boot vm3 from image with flavor flv_rootdisk
                - Boot vm4 from image with flavor flv_rootdisk, and attach a
                volume to it
                - Boot vm5 from image with flavor flv_localdisk
            - sudo reboot -f on vms host
            - Ensure evacuation for all 5 vms are successful (vm host
            changed, active state, pingable from NatBox)

        Teardown:
            - Delete created vms, volumes, flavors
            - Remove admin role from primary tenant (module)

        """
        hosts = hosts_per_backing.get(storage_backing, [])
        if len(hosts) < 2:
            skip(
                SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format(
                    storage_backing))

        target_host = hosts[0]

        LOG.tc_step("Create a flavor without ephemeral or swap disks")
        flavor_1 = nova_helper.create_flavor(
            'flv_rootdisk', storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_1, scope='function')

        LOG.tc_step("Create another flavor with ephemeral and swap disks")
        flavor_2 = nova_helper.create_flavor(
            'flv_ephemswap',
            ephemeral=1,
            swap=512,
            storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_2, scope='function')

        LOG.tc_step("Boot vm1 from volume with flavor flv_rootdisk and wait "
                    "for it pingable from NatBox")
        vm1_name = "vol_root"
        vm1 = vm_helper.boot_vm(vm1_name,
                                flavor=flavor_1,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vms_info = {
            vm1: {
                'ephemeral': 0,
                'swap': 0,
                'vm_type': 'volume',
                'disks': vm_helper.get_vm_devices_via_virsh(vm1)
            }
        }
        vm_helper.wait_for_vm_pingable_from_natbox(vm1)

        LOG.tc_step("Boot vm2 from volume with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm2_name = "vol_ephemswap"
        vm2 = vm_helper.boot_vm(vm2_name,
                                flavor=flavor_2,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm2)
        vms_info[vm2] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'volume',
            'disks': vm_helper.get_vm_devices_via_virsh(vm2)
        }

        LOG.tc_step(
            "Boot vm3 from image with flavor flv_rootdisk and wait for "
            "it pingable from NatBox")
        vm3_name = "image_root"
        vm3 = vm_helper.boot_vm(vm3_name,
                                flavor=flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm3)
        vms_info[vm3] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm3)
        }

        LOG.tc_step("Boot vm4 from image with flavor flv_rootdisk, attach a "
                    "volume to it and wait for it "
                    "pingable from NatBox")
        vm4_name = 'image_root_attachvol'
        vm4 = vm_helper.boot_vm(vm4_name,
                                flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vol = cinder_helper.create_volume(bootable=False)[1]
        ResourceCleanup.add('volume', vol, scope='function')
        vm_helper.attach_vol_to_vm(vm4, vol_id=vol, mount=False)

        vm_helper.wait_for_vm_pingable_from_natbox(vm4)
        vms_info[vm4] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image_with_vol',
            'disks': vm_helper.get_vm_devices_via_virsh(vm4)
        }

        LOG.tc_step("Boot vm5 from image with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm5_name = 'image_ephemswap'
        vm5 = vm_helper.boot_vm(vm5_name,
                                flavor_2,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm5)
        vms_info[vm5] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm5)
        }

        LOG.tc_step("Check all VMs are booted on {}".format(target_host))
        vms_on_host = vm_helper.get_vms_on_host(hostname=target_host)
        vms = [vm1, vm2, vm3, vm4, vm5]
        assert set(vms) <= set(vms_on_host), "VMs booted on host: {}. " \
                                             "Current vms on host: {}". \
            format(vms, vms_on_host)

        for vm_ in vms:
            LOG.tc_step("Touch files under vm disks {}: "
                        "{}".format(vm_, vms_info[vm_]))
            file_paths, content = touch_files_under_vm_disks(
                vm_, **vms_info[vm_])
            vms_info[vm_]['file_paths'] = file_paths
            vms_info[vm_]['content'] = content

        LOG.tc_step("Reboot target host {}".format(target_host))
        vm_helper.evacuate_vms(host=target_host,
                               vms_to_check=vms,
                               ping_vms=True)

        LOG.tc_step("Check files after evacuation")
        for vm_ in vms:
            LOG.info("--------------------Check files for vm {}".format(vm_))
            check_helper.check_vm_files(vm_id=vm_,
                                        vm_action='evacuate',
                                        storage_backing=storage_backing,
                                        prev_host=target_host,
                                        **vms_info[vm_])
        vm_helper.ping_vms_from_natbox(vms)
示例#23
0
    def test_pcipt_robustness(self, pcipt_prep):
        """
        TC3_robustness: PCI-passthrough by locking and rebooting pci_vm host

        Args:
            pcipt_prep: test fixture to set up test environment and get proper pci nets/hosts/seg_id

        Setups:
            - select a providernet with pcipt interfaces configured
            - get pci hosts configured with same above providernet
            - get one network under above providernet (or two for CX4 nic)

        Test Steps:
            - Boot 2 pcipt vms with pci-passthrough vif over selected network
            - Verify resource usage for providernet is increased as expected
            - Lock pci_vm host and ensure vm migrated to other host (or fail to lock if no other pcipt host available)
            - (Delete above tested pcipt vm if only two pcipt hosts available)
            - Lock host for another pcipt vm, and lock is successful
            - Verify vms' pci-pt interfaces reachable and resource usage for pnet as expected
            - 'sudo reboot -f' pci_vm host, and ensure vm evacuated or up on same host if no other pcipt host available
            - Repeat above step for another pcipt vm
            - Verify vms' pci-pt interfaces reachable and resource usage for pnet unchanged

        Teardown:
            - Delete vms, volumes, flavor created
            - Recover hosts if applicable

        """
        net_type, pci_net_name, pci_hosts, pnet_id, nics, min_vcpu_host, seg_id, vm_num, vm_vcpus, pfs_use_init = \
            pcipt_prep
        vif_model = 'pci-passthrough'

        # Create flavor with calculated vcpu number
        LOG.fixture_step(
            "Create a flavor with dedicated cpu policy and {} vcpus".format(
                vm_vcpus))
        flavor_id = nova_helper.create_flavor(
            name='dedicated_{}vcpu'.format(vm_vcpus), ram=1024,
            vcpus=vm_vcpus)[1]
        ResourceCleanup.add('flavor', flavor_id, scope='module')
        extra_specs = {
            FlavorSpec.CPU_POLICY: 'dedicated',
        }
        # FlavorSpec.PCI_NUMA_AFFINITY: 'preferred'}    # LP1854516
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        # Boot vms with 2 {} vifs each, and wait for pingable
        LOG.tc_step("Boot {} vms with 2 {} vifs each".format(
            vm_num, vif_model))
        vms = []
        for i in range(vm_num):
            LOG.info("Booting pci-passthrough vm{}".format(i + 1))
            vm_id = vm_helper.boot_vm(flavor=flavor_id,
                                      nics=nics,
                                      cleanup='function')[1]
            vms.append(vm_id)
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id,
                                                       seg_id,
                                                       init_conf=True)

        # TODO: feature unavailable atm. Update required
        # pfs_use_post_boot = nova_helper.get_provider_net_info(pnet_id, field='pci_pfs_used')
        # resource_change = 2 if isinstance(seg_id, dict) else 1
        # assert pfs_use_post_boot - pfs_use_init == vm_num * resource_change, "Number of PCI pfs used is not as expected"

        check_vm_pci_interface(vms=vms, net_type=net_type)
        HostsToRecover.add(pci_hosts)

        # pfs_use_pre_action = pfs_use_post_boot
        iter_count = 2 if len(pci_hosts) < 3 else 1
        for i in range(iter_count):
            if i == 1:
                LOG.tc_step(
                    "Delete a pcipt vm and test lock and reboot pcipt host again for success pass"
                )
                vm_helper.delete_vms(vms=vms[1])
                vms.pop()
                # TODO: feature unavailable atm. Update required
                # pfs_use_pre_action -= resource_change
                # common.wait_for_val_from_func(expt_val=pfs_use_pre_action, timeout=30, check_interval=3,
                #                               func=nova_helper.get_provider_net_info,
                #                               providernet_id=pnet_id, field='pci_pfs_used')

            LOG.tc_step("Test lock {} vms hosts started - iter{}".format(
                vif_model, i + 1))
            for vm in vms:
                pre_lock_host = vm_helper.get_vm_host(vm)
                assert pre_lock_host in pci_hosts, "VM is not booted on pci_host"

                LOG.tc_step("Lock host of {} vms: {}".format(
                    vif_model, pre_lock_host))
                code, output = host_helper.lock_host(host=pre_lock_host,
                                                     check_first=False,
                                                     swact=True,
                                                     fail_ok=True)
                post_lock_host = vm_helper.get_vm_host(vm)
                assert post_lock_host in pci_hosts, "VM is not on pci host after migrating"

                if len(pci_hosts) < 3 and i == 0:
                    assert 5 == code, "Expect host-lock fail due to migration of vm failure. Actual: {}".format(
                        output)
                    assert pre_lock_host == post_lock_host, "VM host should not change when no other host to migrate to"
                else:
                    assert 0 == code, "Expect host-lock successful. Actual: {}".format(
                        output)
                    assert pre_lock_host != post_lock_host, "VM host did not change"
                    LOG.tc_step("Unlock {}".format(pre_lock_host))

                check_vm_pci_interface(vms, net_type=net_type)
                host_helper.unlock_host(pre_lock_host, available_only=True)
            # TODO: feature unavailable atm. Update required
            # pfs_use_post_lock = nova_helper.get_provider_net_info(pnet_id, field='pci_pfs_used')
            # assert pfs_use_pre_action == pfs_use_post_lock, "Number of PCI pfs used after host-lock is not as expected"

            LOG.tc_step("Test evacuate {} vms started - iter{}".format(
                vif_model, i + 1))
            for vm in vms:
                pre_evac_host = vm_helper.get_vm_host(vm)

                LOG.tc_step(
                    "Reboot {} and ensure {} vm are evacuated when applicable".
                    format(pre_evac_host, vif_model))
                code, output = vm_helper.evacuate_vms(pre_evac_host,
                                                      vm,
                                                      fail_ok=True,
                                                      wait_for_host_up=True)

                if len(pci_hosts) < 3 and i == 0:
                    assert 1 == code, "Expect vm stay on same host due to migration fail. Actual:{}".format(
                        output)
                    vm_helper.wait_for_vm_status(vm_id=vm)
                else:
                    assert 0 == code, "Expect vm evacuated to other host. Actual: {}".format(
                        output)
                    post_evac_host = vm_helper.get_vm_host(vm)
                    assert post_evac_host in pci_hosts, "VM is not on pci host after evacuation"

                check_vm_pci_interface(vms, net_type=net_type)