コード例 #1
0
def test_send_acpi_signal_on_shutdown(nova_action, hard):
    """
    Sample test case for Boot an instance and send acpi signal on shutdown
    Test Steps:
        - Boot a vm with only mgmt interface & tenant interface
        - if hard is set reboot vm with --hard option, for stop/start there is no --hard option
        - ssh to vm & modify /etc/acpi/actions/power.sh file to log message
        - perform nova action using arg 'hard'
        - After nova action verify the message logged in '/var/log/messages'

    Teardown:
        - Delete created vm, volume

    """
    nova_action = nova_action.split('_')
    hard = 1 if 'hard' == hard else 0

    LOG.info("hard option: {}".format(hard))
    LOG.tc_step("Boot a vm")
    vm_under_test = vm_helper.boot_vm(name='send_acpi_signal_to_vm',
                                      cleanup='function')[1]

    LOG.tc_step("Modify gyest acpi file file")
    _modify_guest_acpi_file(vm_id=vm_under_test)

    kwargs = {}
    if hard == 1:
        kwargs = {'hard': True}
    for action in nova_action:
        LOG.tc_step("Perform nova action: {}".format(action))
        vm_helper.perform_action_on_vm(vm_under_test, action=action, **kwargs)

    LOG.tc_step("Verify /var/log/messages file")
    _check_log_messages(vm_id=vm_under_test, hard=hard)
コード例 #2
0
def test_migration_auto_converge(no_simplex):
    """
    Auto converge a VM with stress-ng running

    Test Steps:
        - Create flavor
        - Create a heat stack (launch a vm with stress-ng)
        - Perform live-migration and verify connectivity

    Test Teardown:
        - Delete stacks,vm, flavors created

    """

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=3)[1]
    ResourceCleanup.add('flavor', flavor_id)

    # add migration timout
    extra_specs = {FlavorSpec.LIVE_MIG_TIME_OUT: 300}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.tc_step("Get the heat file name to use")
    heat_template = _get_stress_ng_heat()

    stack_name = vm_name = 'stress_ng'
    LOG.tc_step("Creating heat stack")
    code, msg = heat_helper.create_stack(stack_name=stack_name,
                                         template=heat_template,
                                         parameters={
                                             'flavor': flavor_id,
                                             'name': vm_name
                                         },
                                         cleanup='function')
    assert code == 0, "Failed to create heat stack"

    LOG.info("Verifying server creation via heat")
    vm_id = vm_helper.get_vm_id_from_name(vm_name='stress_ng', strict=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        LOG.tc_step("Check for Stress-ng inside vm")
        assert 0 == wait_for_stress_ng(vm_ssh), " Stress-ng is not running"

    for vm_actions in [['live_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
コード例 #3
0
def _force_live_migrate(vm_id):

    destination_host = vm_helper.get_dest_host_for_live_migrate(vm_id=vm_id)
    args_dict = {
        'force': True,
        'destination_host': destination_host,
    }
    kwargs = {}
    for key, value in args_dict.items():
        if value:
            kwargs[key] = value

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_id, 'live-migrate --force'))
    vm_helper.perform_action_on_vm(vm_id, action='live_migrate', **kwargs)
    return 0
コード例 #4
0
ファイル: test_dpdk_vm.py プロジェクト: pvaduva/auto_test
def test_dpdk_vm_nova_actions(vm_type, num_vcpu):
    """
    DPDK VM with nova operations,  and evacuation test cases

    Test Steps:
        - Create flavor for dpdk
        - Create a dpdk vm
        - Perform nova actions and verify connectivity
        - Perform evacuation

    Test Teardown:
        - Delete vms, ports, subnets, and networks created

    """
    LOG.tc_step("Boot an observer VM")
    vms, nics = vm_helper.launch_vms(vm_type="dpdk")
    vm_observer = vms[0]
    vm_helper.setup_avr_routing(vm_observer)

    vm_id = launch_vm(vm_type=vm_type, num_vcpu=num_vcpu)
    vm_helper.setup_avr_routing(vm_id, vm_type=vm_type)

    for vm_actions in [['reboot'], ['pause', 'unpause'], ['suspend', 'resume'],
                       ['live_migrate'], ['cold_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step("Ping vm")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_helper.ping_vms_from_vm(vm_id,
                                   vm_observer,
                                   net_types=['data', 'internal'],
                                   vshell=True)
コード例 #5
0
def test_interface_attach_detach_max_vnics(guest_os, if_attach_arg, vifs,
                                           check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach to maximum vnics

    Setups:
        - Boot a base vm with mgmt net and internal0-net1   (module)

    Test Steps:
        - Boot a vm with only mgmt interface
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - Perform VM action - Cold migrate, live migrate, pause resume, suspend resume
        - Verify ping between base_vm and vm_under_test over mgmt & tenant network after vm operation
        - detach all the tenant interface
        - Repeat attach/detach after performing each vm action

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """
    if guest_os == 'vxworks' and not system_helper.is_avs():
        skip('e1000 vif unsupported by OVS')

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm

    glance_vif = None
    if not (if_attach_arg == 'port_id' and system_helper.is_avs()):
        for vif in vifs:
            if vif[0] in ('e1000', 'rtl8139'):
                glance_vif = vif[0]
                break

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if (not glance_vif and re.search(
        GuestImages.TIS_GUEST_PATTERN, guest_os)) else 'function'
    image_id = glance_helper.get_guest_image(
        guest_os=guest_os,
        cleanup=cleanup,
        use_existing=False if cleanup else True)

    if glance_vif:
        glance_helper.set_image(image_id,
                                hw_vif_model=glance_vif,
                                new_name='{}_{}'.format(guest_os, glance_vif))

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=1,
                                          guest_os=guest_os,
                                          cleanup='function')[1]

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    code, vol_id = cinder_helper.create_volume(name='vol-' + guest_os,
                                               source_id=image_id,
                                               fail_ok=True,
                                               guest_image=guest_os,
                                               cleanup='function')
    assert 0 == code, "Issue occurred when creating volume"
    source_id = vol_id

    LOG.tc_step("Boot a vm with mgmt nic only")
    vm_under_test = vm_helper.boot_vm(name='if_attach_tenant',
                                      nics=[mgmt_nic],
                                      source_id=source_id,
                                      flavor=flavor_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]
    prev_port_count = 1
    for vm_actions in [['live_migrate'], ['cold_migrate'],
                       ['pause', 'unpause'], ['suspend', 'resume'],
                       ['stop', 'start']]:
        tenant_port_ids = []
        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Attach specified vnics to the VM before {} and bring up interfaces"
                .format(vm_actions))
            expt_vnics = 1
            for vif in vifs:
                vif_model, vif_count = vif
                expt_vnics += vif_count
                LOG.info("iter {}".format(vif_count))
                for i in range(vif_count):
                    if if_attach_arg == 'port_id':
                        vif_model = vif_model if system_helper.is_avs(
                        ) else None
                        port = network_helper.create_port(
                            net_id=tenant_net_id,
                            wrs_vif=vif_model,
                            cleanup='function',
                            name='attach_{}_{}'.format(vif_model, i))[1]
                        kwargs = {'port_id': port}
                    else:
                        kwargs = {'net_id': tenant_net_id}
                    tenant_port_id = vm_helper.attach_interface(
                        vm_under_test, **kwargs)[1]
                    tenant_port_ids.append(tenant_port_id)
                LOG.info(
                    "Attached new vnics to the VM {}".format(tenant_port_ids))

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            LOG.info("vnics attached to VM: {}".format(vm_ports_count))
            assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

            LOG.info(
                "Bring up all the attached new vifs {} on tenant net from vm".
                format(vifs))
            _bring_up_attached_interface(vm_under_test,
                                         ports=tenant_port_ids,
                                         guest_os=guest_os,
                                         base_vm=base_vm_id)

            if expt_vnics == 16:
                LOG.tc_step(
                    "Verify no more vnic can be attached after reaching upper limit 16"
                )
                res = vm_helper.attach_interface(vm_under_test,
                                                 net_id=tenant_net_id,
                                                 fail_ok=True)[0]
                assert res == 1, "vnics attach exceed maximum limit"

        if vm_actions[0] == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, then verify ping from "
                "base vm over management and data networks")
            vm_helper.set_vm_state(vm_id=vm_under_test,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
            # if 'vxworks' not in guest_os:
            #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)
        else:
            LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                vm_under_test, vm_actions))
            for action in vm_actions:
                vm_helper.perform_action_on_vm(vm_under_test, action=action)
                if action == 'cold_migrate' or action == 'start':
                    LOG.tc_step(
                        "Bring up all the attached tenant interface from vm after {}"
                        .format(vm_actions))
                    # if 'vxworks' not in guest_os:
                    #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_id,
                                       net_types=['mgmt', 'data'],
                                       retry=10)

            LOG.tc_step("Detach all attached interface {} after {}".format(
                tenant_port_ids, vm_actions))
            for tenant_port_id in tenant_port_ids:
                vm_helper.detach_interface(vm_id=vm_under_test,
                                           port_id=tenant_port_id,
                                           cleanup_route=True)

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            assert prev_port_count == vm_ports_count, "VM ports still listed after interface-detach"
            res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                             from_vm=vm_under_test,
                                             fail_ok=True,
                                             net_types=['data'],
                                             retry=0)[0]
            assert not res, "Detached interface still works"
コード例 #6
0
def test_interface_attach_detach_on_paused_vm(guest_os, boot_source, vifs,
                                              check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach on stopped vm

    Setups:
        - Boot a base vm with mgmt net and tenant_port_id (module)

    Test Steps:
        - Boot a vm with mgmt and avp port interface
        - Pause the vm
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - perform live migration on paused vm
        - unpause the vm
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - detach all the tenant interface
        - Verify ping to tenant interfaces fail

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm
    LOG.tc_step("Create a avp port")
    init_port_id = network_helper.create_port(tenant_net_id,
                                              'tenant_avp_port',
                                              wrs_vif='avp',
                                              cleanup='function')[1]
    tenant_net_nic = {'port-id': init_port_id, 'vif-model': 'avp'}

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'module'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    LOG.tc_step(
        "Boot a {} vm and flavor from {} with a mgmt and a data interface".
        format(guest_os, boot_source))
    vm_under_test = vm_helper.boot_vm('if_attach-{}-{}'.format(
        guest_os, boot_source),
                                      nics=[mgmt_nic, tenant_net_nic],
                                      source=boot_source,
                                      image_id=image_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]

    _ping_vm_data(vm_under_test=vm_under_test,
                  base_vm_id=base_vm_id,
                  action='boot')

    LOG.tc_step(
        "Pause vm {} before attaching interfaces".format(vm_under_test))
    vm_helper.perform_action_on_vm(vm_under_test, action='pause')

    LOG.tc_step("Create and attach vnics to the VM: {}".format(vifs))
    tenant_port_ids = network_helper.get_ports(server=vm_under_test,
                                               network=tenant_net_id)
    expt_vnics = 2
    new_vnics = 0
    for vif in vifs:
        vif_model, vif_count = vif
        expt_vnics += vif_count
        LOG.info("iter {}".format(vif_count))
        LOG.info("Create and attach {} {} vnics to vm {}".format(
            vif_count, vif_model, vm_under_test))
        for i in range(vif_count):
            name = 'attached_port-{}_{}'.format(vif_model, i)
            port_id = network_helper.create_port(net_id=tenant_net_id,
                                                 name=name,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
            vm_helper.attach_interface(vm_under_test, port_id=port_id)
            new_vnics += 1
            tenant_port_ids.append(port_id)

    vm_ports_count = len(network_helper.get_ports(server=vm_under_test))
    LOG.info("vnics attached to VM: {}".format(vm_ports_count))
    assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

    if expt_vnics == 16:
        res = vm_helper.attach_interface(vm_under_test,
                                         net_id=tenant_net_id,
                                         fail_ok=True)[0]
        assert res == 1, "vnics attach exceed maximum limit"

    LOG.tc_step("Live migrate paused vm")
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')

    LOG.tc_step(
        "Unpause live-migrated vm, bring up attached interfaces and ping the VM"
    )
    vm_helper.perform_action_on_vm(vm_under_test, action='unpause')
    _bring_up_attached_interface(
        vm_under_test,
        guest_os=guest_os,
        ports=tenant_port_ids,
        base_vm=base_vm_id,
        action='pause, attach interfaces, live migrate and unpause')

    LOG.tc_step("Live migrate again after unpausing the vm")
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')
    _ping_vm_data(vm_under_test, base_vm_id, action='live migrate')

    LOG.tc_step("Detach ports: {}".format(tenant_port_ids))
    for tenant_port_id in tenant_port_ids:
        vm_helper.detach_interface(vm_id=vm_under_test, port_id=tenant_port_id)
        new_vnics -= 1

    res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                     from_vm=vm_under_test,
                                     fail_ok=True,
                                     net_types=['data'],
                                     retry=0)[0]
    assert not res, "Ping from base_vm to vm via detached interface still works"

    LOG.tc_step(
        "Attach single interface with tenant id {}".format(tenant_net_id))
    port_id = vm_helper.attach_interface(vm_under_test,
                                         net_id=tenant_net_id)[1]
    new_vnics += 1

    LOG.tc_step(
        "Live migrate vm after detach/attach, bring up interfaces and ensure ping works"
    )
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')
    _bring_up_attached_interface(vm_under_test,
                                 guest_os=guest_os,
                                 ports=[port_id],
                                 base_vm=base_vm_id,
                                 action='attach interface and live migrate')
コード例 #7
0
def test_cpu_realtime_vm_actions(vcpus, cpu_rt, rt_mask, rt_source, shared_vcpu, numa_nodes, cpu_thread, check_hosts):
    """
    Test vm with realtime cpu policy specified in flavor
    Args:
        vcpus (int):
        cpu_rt (str|None):
        rt_source (str): flavor or image
        rt_mask (str):
        shared_vcpu (int|None):min_vcpus
        numa_nodes (int|None): number of numa_nodes to boot vm on
        cpu_thread
        check_hosts (tuple): test fixture

    Setups:
        - check storage backing and whether system has shared cpu configured

    Test Steps:
        - Create a flavor with given cpu realtime, realtime mask and shared vcpu extra spec settings
        - Create a vm with above flavor
        - Verify cpu scheduler policies via virsh dumpxml and ps
        - Perform following nova actions and repeat above step after each action:
            ['suspend', 'resume'],
            ['live_migrate'],
            ['cold_migrate'],
            ['rebuild']

    """
    storage_backing, hosts_with_shared_cpu, ht_hosts = check_hosts

    if cpu_thread == 'require' and len(ht_hosts) < 2:
        skip("Less than two hyperthreaded hosts")

    if shared_vcpu is not None and len(hosts_with_shared_cpu) < 2:
        skip("Less than two up hypervisors configured with shared cpu")

    cpu_rt_flv = cpu_rt
    if rt_source == 'image':
        # rt_mask_flv = cpu_rt_flv = None
        rt_mask_flv = '^0'
        rt_mask_img = rt_mask
    else:
        rt_mask_flv = rt_mask
        rt_mask_img = None

    image_id = None
    if rt_mask_img is not None:
        image_metadata = {ImageMetadata.CPU_RT_MASK: rt_mask_img}
        image_id = glance_helper.create_image(name='rt_mask', cleanup='function', **image_metadata)[1]

    vol_id = cinder_helper.create_volume(source_id=image_id)[1]
    ResourceCleanup.add('volume', vol_id)

    name = 'rt-{}_mask-{}_{}vcpu'.format(cpu_rt, rt_mask_flv, vcpus)
    flv_id = create_rt_flavor(vcpus, cpu_pol='dedicated', cpu_rt=cpu_rt_flv, rt_mask=rt_mask_flv,
                              shared_vcpu=shared_vcpu, numa_nodes=numa_nodes, cpu_thread=cpu_thread,
                              storage_backing=storage_backing)[0]

    LOG.tc_step("Boot a vm with above flavor")
    vm_id = vm_helper.boot_vm(name=name, flavor=flv_id, cleanup='function', source='volume', source_id=vol_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    expt_rt_cpus, expt_ord_cpus = parse_rt_and_ord_cpus(vcpus=vcpus, cpu_rt=cpu_rt, cpu_rt_mask=rt_mask)

    check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu)
    vm_host = vm_helper.get_vm_host(vm_id)
    if shared_vcpu:
        assert vm_host in hosts_with_shared_cpu

    numa_num = 1 if numa_nodes is None else numa_nodes
    check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, vm_host=vm_host)

    expt_current_cpu = vcpus
    # if min_vcpus is not None:
    #     GuestLogs.add(vm_id)
    #     LOG.tc_step("Scale down cpu once")
    #     vm_helper.scale_vm(vm_id, direction='down', resource='cpu')
    #     vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    #
    #     LOG.tc_step("Check current vcpus in nova show is reduced after scale down")
    #     expt_current_cpu -= 1
    #     check_helper.check_vm_vcpus_via_nova_show(vm_id, min_vcpus, expt_current_cpu, vcpus)

    for actions in [['suspend', 'resume'], ['stop', 'start'], ['live_migrate'], ['cold_migrate'], ['rebuild']]:
        LOG.tc_step("Perform {} on vm and check realtime cpu policy".format(actions))
        for action in actions:
            kwargs = {}
            if action == 'rebuild':
                kwargs = {'image_id': image_id}
            vm_helper.perform_action_on_vm(vm_id, action=action, **kwargs)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host_post_action = vm_helper.get_vm_host(vm_id)
        if shared_vcpu:
            assert vm_host_post_action in hosts_with_shared_cpu

        LOG.tc_step("Check cpu thread policy in vm topology and vcpus in nova show after {}".format(actions))
        check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, numa_num=numa_num,
                                          vm_host=vm_host_post_action, current_vcpus=expt_current_cpu)

        check_virsh = True
        offline_cpu = None

        check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu,
                                               offline_cpus=offline_cpu, check_virsh_vcpusched=check_virsh)
コード例 #8
0
def test_nova_actions(guest_os, cpu_pol, actions):
    """

    Args:
        guest_os:
        cpu_pol:
        actions:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image with specified cpu
        policy
        - Perform given nova actions on vm
        - Ensure nova operation succeeded and vm still in good state (active
        and reachable from NatBox)

    """
    if guest_os == 'opensuse_12':
        if not cinder_helper.is_volumes_pool_sufficient(min_size=40):
            skip(SkipStorageSpace.SMALL_CINDER_VOLUMES_POOL)

    img_id = glance_helper.get_guest_image(guest_os=guest_os)

    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = nova_helper.create_flavor(name=cpu_pol, vcpus=1,
                                          root_disk=9)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    vol_id = \
        cinder_helper.create_volume(name='vol-' + guest_os, source_id=img_id,
                                    guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm('nova_actions',
                              flavor=flavor_id,
                              source='volume',
                              source_id=vol_id,
                              cleanup='function')[1]

    LOG.tc_step("Wait for VM pingable from NATBOX")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    for action in actions:
        if action == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, "
                "then verify ping from base vm over "
                "management and data networks")
            vm_helper.set_vm_state(vm_id=vm_id,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_id,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
        else:
            LOG.tc_step("Perform following action on vm {}: {}".format(
                vm_id, action))
            vm_helper.perform_action_on_vm(vm_id, action=action)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
コード例 #9
0
    def test_multiports_on_same_network_pci_vm_actions(self, base_setup_pci,
                                                       vifs):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            base_setup_pci (tuple): base_vm_pci, flavor, mgmt_net_id,
                tenant_net_id, internal_net_id, seg_id
            vifs (list): list of vifs to add to same internal net

        Setups:
            - Create a flavor with dedicated cpu policy (class)
            - Choose management net, one tenant net, and internal0-net1 to be
            used by test (class)
            - Boot a base pci-sriov vm - vm1 with above flavor and networks,
            ping it from NatBox (class)
            - Ping vm1 from itself over data, and internal networks

        Test Steps:
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with vm1,
                and ping it from NatBox
            - Ping vm2's own data and internal network ips
            - Ping vm2 from vm1 to verify management and data networks
            connection
            - Perform one of the following actions on vm2
                - set to error/ wait for auto recovery
                - suspend/resume
                - cold migration
                - pause/unpause
            - Update vlan interface to proper eth if pci-passthrough device
            moves to different eth
            - Verify ping from vm1 to vm2 over management and data networks
            still works
            - Repeat last 3 steps with different vm actions

        Teardown:
            - Delete created vms and flavor
        """

        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        pcipt_included = False
        internal_net_id = None
        for vif in vifs:
            if not isinstance(vif, str):
                vif = vif[0]
            if 'pci-passthrough' in vif:
                if not avail_pcipt_net:
                    skip(SkipHostIf.PCIPT_IF_UNAVAIL)
                internal_net_id = avail_pcipt_net
                pcipt_included = True
                continue
            elif 'pci-sriov' in vif:
                if not avail_sriov_net:
                    skip(SkipHostIf.SRIOV_IF_UNAVAIL)
                internal_net_id = avail_sriov_net

        assert internal_net_id, "test script error. Internal net should have " \
                                "been determined."

        nics, glance_vif = _append_nics_for_net(vifs, net_id=internal_net_id,
                                                nics=base_nics)
        if pcipt_included and extra_pcipt_net:
            nics.append(
                {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})

        img_id = None
        if glance_vif:
            img_id = glance_helper.create_image(name=glance_vif,
                                                hw_vif_model=glance_vif,
                                                cleanup='function')[1]

        LOG.tc_step("Boot a vm with following vifs on same internal net: "
                    "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False, image_id=img_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, "
            "and internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        for vm_actions in [['auto_recover'], ['cold_migrate'],
                           ['pause', 'unpause'], ['suspend', 'resume']]:
            if 'auto_recover' in vm_actions:
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, "
                    "then verify ping from base vm over management and "
                    "internal networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=False, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    vm_helper.perform_action_on_vm(vm_under_test, action=action)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_under_test)
            if pcipt_included:
                LOG.tc_step(
                    "Bring up vlan interface for pci-passthrough vm {}.".format(
                        vm_under_test))
                vm_helper.add_vlan_for_vm_pcipt_interfaces(
                    vm_id=vm_under_test, net_seg_id=pcipt_seg_ids)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and internal networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_pci,
                                       net_types=['mgmt', 'internal'])
コード例 #10
0
    def test_multiports_on_same_network_vm_actions(self, vifs, base_setup):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm
                with specified (vif_mode, pci_address)
            base_setup (list): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used
            by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping
            it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks
            connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks
            still works

        Teardown:
            - Delete created vms and flavor
        """
        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = \
            base_setup

        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        for vm_actions in [['auto_recover'],
                           ['cold_migrate'],
                           ['pause', 'unpause'],
                           ['suspend', 'resume'],
                           ['hard_reboot']]:
            if vm_actions[0] == 'auto_recover':
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, then verify ping from "
                    "base vm over management and data networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=True, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    if 'migrate' in action and system_helper.is_aio_simplex():
                        continue

                    kwargs = {}
                    if action == 'hard_reboot':
                        action = 'reboot'
                        kwargs['hard'] = True
                    kwargs['action'] = action

                    vm_helper.perform_action_on_vm(vm_under_test, **kwargs)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

            # LOG.tc_step("Verify vm pci address preserved after {}".format(
            # vm_actions))
            # check_helper.check_vm_pci_addr(vm_under_test, nics)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and data networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm,
                                       net_types=['mgmt', 'data'])
コード例 #11
0
def test_port_trunking():
    """
    Port trunking feature test cases

    Test Steps:
        - Create networks
        - Create subnets
        - Create a parent port and subports
        - Create a truck with parent port and subports
        - Boot the first vm with the trunk
        - Create the second trunk without subport
        - Boot the second vm
        - Add support to trunk
        - Configure vlan interfaces inside guests
        - Verify connectivity via vlan interfaces
        - Remove the subport from trunk and verify connectivity
        - Add the support to trunk and verify connectivity
        - Do vm actions and verify connectivity


    Test Teardown:
        - Delete vms, ports, subnets, and networks created

    """
    vif_model = 'avp' if system_helper.is_avs() else None
    network_names = ['network11', 'network12', 'network13']
    net_ids = []
    sub_nets = ["30.0.0.0/24", "30.0.1.0/24", "30.0.2.0/24"]
    subnet_ids = []
    # parent ports and sub ports for trunk 1 and trunk 2
    trunk1_parent_port = 'vrf10'
    trunk1_subport_1 = 'vrf11'
    trunk1_subport_2 = 'vrf12'

    trunk2_parent_port = 'host10'
    trunk2_subport_1 = 'host11'
    trunk2_subport_2 = 'host12'

    # vlan id for the subports
    segment_1 = 1
    segment_2 = 2

    LOG.tc_step("Create Networks to be used by trunk")
    for net in network_names:
        net_ids.append(
            network_helper.create_network(name=net, cleanup='function')[1])

    LOG.tc_step("Create Subnet on the Network Created")
    for sub, network in zip(sub_nets, net_ids):
        subnet_ids.append(
            network_helper.create_subnet(network=network,
                                         subnet_range=sub,
                                         gateway='none',
                                         cleanup='function')[1])

    # Create Trunks
    LOG.tc_step("Create Parent port for trunk 1")
    t1_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk1_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t1_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk1_parent_port)[0]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk1_subport_1,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    LOG.tc_step("Create Subport with parent port mac to be used by trunk 1")
    t1_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk1_subport_2,
                                                 mac_addr=t1_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t1_sub_ports = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t1_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 1")
    trunk1_id = network_helper.create_trunk(t1_parent_port_id,
                                            name='trunk-1',
                                            sub_ports=t1_sub_ports,
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{'net-id': mgmt_net_id}, {'port-id': t1_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm_id = vm_helper.boot_vm(name='vm-with-trunk1-port',
                              nics=nics,
                              cleanup='function')[1]
    LOG.tc_step("Setup vlan interfaces inside guest")
    _bring_up_vlan_interface(vm_id, 'eth1', [segment_1])

    # Create second trunk port  with out the subports and vm
    LOG.tc_step("Create Parent port for trunk 2")
    t2_parent_port_id = network_helper.create_port(net_ids[0],
                                                   trunk2_parent_port,
                                                   wrs_vif=vif_model,
                                                   cleanup='function')[1]
    t2_parent_port_mac = network_helper.get_ports(
        field='mac address', port_name=trunk2_parent_port)[0]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port1_id = network_helper.create_port(net_ids[1],
                                                 name=trunk2_subport_1,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
    LOG.tc_step("Create Subport with parent port mac to be used by trunk 2")
    t2_sub_port2_id = network_helper.create_port(net_ids[2],
                                                 name=trunk2_subport_2,
                                                 mac_addr=t2_parent_port_mac,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]

    t2_sub_ports = [{
        'port': t2_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }, {
        'port': t2_sub_port2_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_2
    }]

    LOG.tc_step("Create port trunk 2")
    trunk2_id = network_helper.create_trunk(t2_parent_port_id,
                                            name='trunk-2',
                                            cleanup='function')[1]

    LOG.tc_step("Boot a VM with mgmt net and trunk port")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics_2 = [{'net-id': mgmt_net_id}, {'port-id': t2_parent_port_id}]

    LOG.tc_step("Boot a vm with created ports")
    vm2_id = vm_helper.boot_vm(name='vm-with-trunk2-port',
                               nics=nics_2,
                               cleanup='function')[1]

    LOG.tc_step("Add the sub ports to the second truck")
    network_helper.set_trunk(trunk2_id, sub_ports=t2_sub_ports)

    LOG.tc_step("Setup VLAN interfaces inside guest")
    _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

    # ping b/w 2 vms using the vlan interfaces
    eth_name = 'eth1.1'

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        ip_addr = network_helper.get_ip_for_eth(eth_name=eth_name,
                                                ssh_client=vm_ssh)

    if ip_addr:
        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step("Ping on vlan interface from guest")
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

    # unset the subport on trunk_1 and try the ping (it will fail)
    LOG.tc_step(
        "Removing a subport from trunk and ping on vlan interface inside guest"
    )
    ret_code_10 = network_helper.unset_trunk(trunk1_id,
                                             sub_ports=[t1_sub_port1_id])[0]
    assert ret_code_10 == 0, "Subports not removed as expected."

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        ping = network_helper.ping_server(ip_addr,
                                          ssh_client=vm2_ssh,
                                          num_pings=20,
                                          fail_ok=True)[0]
        assert ping == 100, "Ping did not fail as expected."

    # set the subport on trunk_1 and try the ping (it will work)
    LOG.tc_step(
        " Add back the subport to trunk and ping on vlan interface inside guest"
    )
    t1_sub_port = [{
        'port': t1_sub_port1_id,
        'segmentation-type': 'vlan',
        'segmentation-id': segment_1
    }]
    network_helper.set_trunk(trunk1_id, sub_ports=t1_sub_port)

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    # VM operation and ping
    for vm_actions in [['pause', 'unpause'], ['suspend', 'resume'],
                       ['live_migrate'], ['cold_migrate']]:

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm2_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm2_id, action=action)

        LOG.tc_step("Ping vm from natbox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management networks still works "
            "after {}".format(vm_actions))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm2_id,
                                   net_types=['mgmt'])

        if vm_actions[0] == 'cold_migrate':
            LOG.tc_step("Setup VLAN interfaces inside guest")
            _bring_up_vlan_interface(vm2_id, 'eth1', [segment_1])

        with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
            LOG.tc_step(
                "Ping on vlan interface from guest after action {}".format(
                    vm_actions))
            network_helper.ping_server(ip_addr,
                                       ssh_client=vm2_ssh,
                                       num_pings=20,
                                       fail_ok=False)

        vm_host = vm_helper.get_vm_host(vm2_id)

        vm_on_target_host = vm_helper.get_vms_on_host(vm_host)

    LOG.tc_step(
        "Reboot VMs host {} and ensure vms are evacuated to other host".format(
            vm_host))
    vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm2_id, ping_vms=True)

    for vm_id_on_target_host in vm_on_target_host:
        LOG.tc_step("Setup VLAN interfaces inside guest")
        _bring_up_vlan_interface(vm_id_on_target_host, 'eth1', [segment_1])

    with vm_helper.ssh_to_vm_from_natbox(vm2_id) as vm2_ssh:
        LOG.tc_step("Ping on vlan interface from guest after evacuation")
        network_helper.ping_server(ip_addr,
                                   ssh_client=vm2_ssh,
                                   num_pings=20,
                                   fail_ok=False)

    LOG.tc_step(
        "Attempt to delete trunk when in use, expect pass for AVS only")
    code = network_helper.delete_trunks(trunks=trunk1_id, fail_ok=True)[0]

    if system_helper.is_avs():
        assert 0 == code, "Failed to delete port trunk when it's used by a running VM wiht AVS"
    else:
        assert 1 == code, "Trunk is deleted when it's used by a running VM with OVS"
コード例 #12
0
def test_server_group_boot_vms(policy, vms_num, check_system):
    """
    Test server group policy and messaging
    Test live migration with anti-affinity server group (TC6566)
    Test changing size of existing server group via CLI (TC2917)

    Args:
        policy (str): server group policy to set when creating the group
        vms_num (int): number of vms to boot

    Test Steps:
        - Create a server group with given policy
        - Add given metadata to above server group
        - Boot vm(s) with above server group
        - Verify vm(s) booted successfully and is a member of the server group
        - Verify that all vms have the server group listed in nova show
        - If more than 1 hypervisor available:
            - Attempt to live/cold migrate one of the vms, and check they succeed/fail based on
                server group setting

    Teardown:
        - Delete created vms, flavor, server group

    """
    hosts, storage_backing, up_hypervisors = check_system
    host_count = len(hosts)
    if host_count < 2 and policy == 'anti_affinity':
        skip(
            "Skip anti_affinity strict for system with 1 up host in storage aggregate"
        )

    flavor_id, srv_grp_id = create_flavor_and_server_group(
        storage_backing=storage_backing, policy=policy)
    vm_hosts = []
    members = []
    failed_num = 0
    if policy == 'anti_affinity' and vms_num > host_count:
        failed_num = vms_num - host_count
        vms_num = host_count

    LOG.tc_step(
        "Boot {} vm(s) with flavor {} in server group {} and ensure they are "
        "successfully booted.".format(vms_num, flavor_id, srv_grp_id))

    for i in range(vms_num):
        vm_id = vm_helper.boot_vm(name='srv_grp',
                                  flavor=flavor_id,
                                  hint={'group': srv_grp_id},
                                  fail_ok=False,
                                  cleanup='function')[1]

        LOG.tc_step("Check vm {} is in server group {}".format(
            vm_id, srv_grp_id))
        members = nova_helper.get_server_group_info(srv_grp_id,
                                                    headers='Members')[0]
        assert vm_id in members, "VM {} is not a member of server group {}".format(
            vm_id, srv_grp_id)

        vm_hosts.append(vm_helper.get_vm_host(vm_id))

    for i in range(failed_num):
        LOG.tc_step(
            "Boot vm{} in server group {} that's expected to fail".format(
                i, srv_grp_id))
        code, vm_id, err = vm_helper.boot_vm(name='srv_grp',
                                             flavor=flavor_id,
                                             hint={'group': srv_grp_id},
                                             fail_ok=True,
                                             cleanup='function')

        vm_helper.get_vm_fault_message(vm_id)
        assert 1 == code, "Boot vm is not rejected"

    unique_vm_hosts = list(set(vm_hosts))
    if policy in ('affinity', 'soft_affinity') or host_count == 1:
        assert 1 == len(unique_vm_hosts)
    else:
        assert len(unique_vm_hosts) == min(vms_num, host_count), \
            "Improper VM hosts for anti-affinity policy"

    assert len(members) == vms_num

    for vm in members:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    if host_count > 1:
        # TC6566 verified here
        expt_fail = policy == 'affinity' or (policy == 'anti_affinity'
                                             and host_count - vms_num < 1)

        for action in ('live_migrate', 'cold_migrate'):
            LOG.tc_step("Attempt to {} VMs and ensure it {}".format(
                action, 'fails' if expt_fail else 'pass'))
            vm_hosts_after_mig = []
            for vm in members:
                code, output = vm_helper.perform_action_on_vm(vm,
                                                              action=action,
                                                              fail_ok=True)
                if expt_fail:
                    assert 1 == code, "{} was not rejected. {}".format(
                        action, output)
                else:
                    assert 0 == code, "{} failed. {}".format(action, output)
                vm_host = vm_helper.get_vm_host(vm)
                vm_hosts_after_mig.append(vm_host)
                vm_helper.wait_for_vm_pingable_from_natbox(vm)

            if policy == 'affinity':
                assert len(list(set(vm_hosts_after_mig))) == 1
            elif policy == 'anti_affinity':
                assert len(list(set(vm_hosts_after_mig))) == vms_num, \
                    "Some VMs are on same host with strict anti-affinity polity"
コード例 #13
0
def test_vm_actions_secure_boot_vm():
    """
    This is to test a vm that is booted with secure boot and do the vm actions such as reboot,
    migrations

    :return:

    """
    guests_os = ['trusty_uefi', 'uefi_shell']
    disk_format = ['qcow2', 'raw']
    image_ids = []
    volume_ids = []
    for guest_os, disk_format in zip(guests_os, disk_format):
        image_ids.append(
            create_image_with_metadata(
                guest_os=guest_os,
                property_key=ImageMetadata.FIRMWARE_TYPE,
                values=['uefi'],
                disk_format=disk_format,
                container_format='bare'))
    # create a flavor
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1]
    ResourceCleanup.add('flavor', flavor_id)
    # boot a vm using the above image
    for image_id in image_ids:
        volume_ids.append(
            cinder_helper.create_volume(source_id=image_id[0],
                                        size=5,
                                        cleanup='function')[1])

    block_device_dic = [{
        'id': volume_ids[1],
        'source': 'volume',
        'bootindex': 0
    }, {
        'id': volume_ids[0],
        'source': 'volume',
        'bootindex': 1
    }]

    vm_id = vm_helper.boot_vm(name='sec-boot-vm',
                              source='block_device',
                              flavor=flavor_id,
                              block_device=block_device_dic,
                              cleanup='function',
                              guest_os=guests_os[0])[1]

    _check_secure_boot_on_vm(vm_id=vm_id)
    if system_helper.is_aio_simplex():
        vm_actions_list = ('reboot', ['pause',
                                      'unpause'], ['suspend', 'resume'])
    else:
        vm_actions_list = ('reboot', ['pause',
                                      'unpause'], ['suspend', 'resume'],
                           'live_migrate', 'cold_migrate', 'cold_mig_revert')

    for vm_actions in vm_actions_list:
        if isinstance(vm_actions, str):
            vm_actions = (vm_actions, )
        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_id, vm_actions))
        for action in vm_actions:
            vm_helper.perform_action_on_vm(vm_id, action=action)

        LOG.tc_step(
            "Verifying Secure boot is still enabled after vm action {}".format(
                vm_actions))
        _check_secure_boot_on_vm(vm_id=vm_id)