Exemplo n.º 1
0
def test_migrate_stress(check_hypervisors, boot_source, count):

    LOG.tc_step("Launch a VM from {}".format(boot_source))
    vm = vm_helper.boot_vm(name='{}-stress'.format(boot_source), cleanup='function',
                           source=boot_source)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    block_mig = True if boot_source == 'image' else False
    if not block_mig:
        LOG.tc_step("Attempt to block migration on boot-from-volume VM and ensure if fails")
        code = vm_helper.live_migrate_vm(vm_id=vm, block_migrate=True)[0]
        assert code > 0, "Block migration passed unexpectedly for boot-from-volume vm"
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    LOG.tc_step("Live migrate and ping vm 1000 times")
    for i in range(count):
        LOG.info('Live migration iter{}'.format(i+1))
        vm_helper.live_migrate_vm(vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    LOG.tc_step("Cold migrate vm followed by live migrate {} times".format(count))
    for i in range(count):
        LOG.info('Cold+live migration iter{}'.format(i + 1))
        vm_helper.cold_migrate_vm(vm_id=vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

        vm_helper.live_migrate_vm(vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
Exemplo n.º 2
0
def test_migrate_4k_vm_positive(ephemeral, swap, cpu_pol, vcpus, vm_type,
                                ensure_sufficient_4k_pages):
    """
    Test live and cold migrate 4k vm with various vm storage configurations
    Args:
        ephemeral (int):
        swap (int):
        cpu_pol (str):
        vcpus (int):
        vm_type (str): boot-from image or volume vm
        ensure_sufficient_4k_pages (tuple): module test fixture to configure 4k pages

    Setups:
        - Select at least 2 hosts with specified storage backing. e.g., local_image, or remote
        - Ensure 2 hosts are in nova zone (move rest to cgcsauto zone if more than 2)
        - Configure the 2 hosts with large amount of 4k pages

    Test Steps:
        - Create flavor with specified ephemeral, swap,

    """
    storage_backing, hosts = ensure_sufficient_4k_pages

    vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol,
                                vcpus, vm_type)

    LOG.tc_step("Cold migrate VM and ensure it succeeded")
    vm_helper.cold_migrate_vm(vm_id)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    __check_pagesize(vm_id)

    LOG.tc_step("Attempt to live migrate VM")
    vm_helper.live_migrate_vm(vm_id)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    __check_pagesize(vm_id)
Exemplo n.º 3
0
def _test_migrate_anti_affinity_vms_in_parallel():
    """
    cold-migrate and live-migrate vms from anti-affinity group
    It will check if the heat stack is launched already if not it will launch the stack
    find the vms in anti-affinity group and will do cold and live migration

    """
    # First make sure heat stack is there:
    system_test_helper.launch_heat_stack()

    srv_grps_info = nova_helper.get_server_groups_info(headers=('Policies',
                                                                'Metadata',
                                                                'Members'))
    vms = []
    for group in srv_grps_info:
        policies, metadata, members = srv_grps_info[group]
        if members and 'anti-affinity' in policies and metadata[
                'wrs-sg:best_effort'] == 'false':
            if len(members) >= 10:
                vms = members[range(0, 9)]
            break
    else:
        skip("There are no VMs in anti-affinity server group")

    check_vm_hosts(vms=vms, policy='anti_affinity')

    for vm_id in vms:
        vm_helper.wait_for_vm_status(vm_id=vm_id, check_interval=10)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
        vm_helper.live_migrate_vm(vm_id=vm_id)
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    check_vm_hosts(vms=vms, policy='anti_affinity')
Exemplo n.º 4
0
def test_heat_vm_scale_after_actions(vm_scaling_stack, actions):
    """
    Test VM auto scaling with swact:
        Create heat stack for auto scaling using NestedAutoScale.yaml,  swact and perform vm scale up and down.

    Test Steps:
        - Create a heat stack for auto scaling vm ()
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - live migrate the vm if not sx
        - cold migrate the vm if not sx
        - swact if not sx
        - reboot -f vm host
        - trigger auto scale by boosting cpu usage in the vm (using dd)
        - verify it scale up to the max number of vms (3)
        - trigger scale down by killing dd in the vm
        - verify the vm scale down to min number (1)
        - Delete Heat stack and verify resource deletion
    """
    stack_name, vm_id = vm_scaling_stack
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if not system_helper.is_aio_simplex():
        actions = actions.split('-')
        if "swact" in actions:
            LOG.tc_step("Swact before scale in/out")
            host_helper.swact_host()

        if "live_migrate" in actions:
            LOG.tc_step("live migrate vm before scale in/out")
            vm_helper.live_migrate_vm(vm_id)

        if "cold_migrate" in actions:
            LOG.tc_step("cold migrate vm before scale in/out")
            vm_helper.cold_migrate_vm(vm_id)

    if "host_reboot" in actions:
        if system_helper.is_aio_simplex():
            host_helper.reboot_hosts('controller-0')
            vm_helper.wait_for_vm_status(vm_id,
                                         status=VMStatus.ACTIVE,
                                         timeout=600,
                                         check_interval=10,
                                         fail_ok=False)
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id, timeout=VMTimeout.DHCP_RETRY)
        else:
            LOG.tc_step("evacuate vm before scale in/out")
            vm_host = vm_helper.get_vm_host(vm_id=vm_id)
            vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id)

    LOG.tc_step(
        "Wait for {} vms to auto scale out to {} after running dd in vm(s)".
        format(stack_name, 3))
    vm_helper.wait_for_auto_vm_scale_out(stack_name, expt_max=3)

    LOG.tc_step(
        "Wait for {} vms to auto scale in to {} after killing dd processes in vms"
        .format(stack_name, 1))
    vm_helper.wait_for_auto_vm_scale_in(stack_name, expt_min=1)
Exemplo n.º 5
0
    def test_evacuate_vms(self, vms_):
        """
        Test evacuated vms
        Args:
            vms_: (fixture to create vms)

        Pre-requisites:
            - At least two up hypervisors on system

        Test Steps:
            - Create vms with various options:
                - vm booted from cinder volume,
                - vm booted from glance image,
                - vm booted from glance image, and have an extra cinder
                volume attached after launch,
                - vm booed from cinder volume with ephemeral and swap disks
            - Move vms onto same hypervisor
            - sudo reboot -f on the host
            - Ensure vms are successfully evacuated to other host
            - Live migrate vms back to original host
            - Check vms can move back, and vms are still reachable from natbox
            - Check system services are enabled and neutron agents are alive

        """
        vms, target_host = vms_

        pre_res_sys, pre_msg_sys = system_helper.wait_for_services_enable(
            timeout=20, fail_ok=True)
        up_hypervisors = host_helper.get_up_hypervisors()
        pre_res_neutron, pre_msg_neutron = \
            network_helper.wait_for_agents_healthy(
                up_hypervisors, timeout=20, fail_ok=True)

        LOG.tc_step(
            "reboot -f on vms host, ensure vms are successfully evacuated and "
            "host is recovered after reboot")
        vm_helper.evacuate_vms(host=target_host,
                               vms_to_check=vms,
                               wait_for_host_up=True,
                               ping_vms=True)

        LOG.tc_step("Check rebooted host can still host vm")
        vm_helper.live_migrate_vm(vms[0], destination_host=target_host)
        vm_helper.wait_for_vm_pingable_from_natbox(vms[0])

        LOG.tc_step("Check system services and neutron agents after {} "
                    "reboot".format(target_host))
        post_res_sys, post_msg_sys = system_helper.wait_for_services_enable(
            fail_ok=True)
        post_res_neutron, post_msg_neutron = \
            network_helper.wait_for_agents_healthy(hosts=up_hypervisors,
                                                   fail_ok=True)

        assert post_res_sys, "\nPost-evac system services stats: {}" \
                             "\nPre-evac system services stats: {}". \
            format(post_msg_sys, pre_msg_sys)
        assert post_res_neutron, "\nPost evac neutron agents stats: {}" \
                                 "\nPre-evac neutron agents stats: {}". \
            format(pre_msg_neutron, post_msg_neutron)
Exemplo n.º 6
0
def obsolete_test_fip(fip_setups):
    """
    Test VM Floating IP  over VM launch, live-migration, cold-migration, pause/unpause, etc

    Args:
        fip_setups: test fixture

    Test Setups (module):
        - Create a floating ip
        - boot a vm
        - Attach floating ip to vm

    Test Steps:
        - Ping  VM FIP
        - Live-migrate the VM and verify ping from VM
        - Cold-migrate the VM and verify ping from VM
        - Pause and un-pause the VM and verify ping from VM
        - Suspend and resume the VM and verify ping from VM
        - Stop and start the VM and verify ping from VM
        - Reboot the VM and verify ping from VM
        - Ping  VM FIP

    Test Teardown:
        - Delete created FIP and vm (module)

    """
    vm_id, fip = fip_setups
    LOG.tc_step("Ping VM with Floating IP ")
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Live-migrate the VM and verify ping from VM")
    vm_helper.live_migrate_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Cold-migrate the VM and verify ping from VM")
    vm_helper.cold_migrate_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Pause and un-pause the VM and verify ping from VM")
    vm_helper.pause_vm(vm_id)
    vm_helper.unpause_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Suspend and resume the VM and verify ping from VM")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Stop and start the VM and verify ping from VM")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Reboot the VM and verify ping from VM")
    vm_helper.reboot_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Ping VM with Floating IP Ensure FIP reachable ")
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)
Exemplo n.º 7
0
def test_system_persist_over_host_reboot(host_type, stx_openstack_required):
    """
    Validate Inventory summary over reboot of one of the controller see if data persists over reboot

    Test Steps:
        - capture Inventory summary for list of hosts on system service-list and neutron agent-list
        - reboot the current Controller-Active
        - Wait for reboot to complete
        - Validate key items from inventory persist over reboot

    """
    if host_type == 'controller':
        host = system_helper.get_active_controller_name()
    elif host_type == 'compute':
        if system_helper.is_aio_system():
            skip("No compute host for AIO system")

        host = None
    else:
        hosts = system_helper.get_hosts(personality='storage')
        if not hosts:
            skip(msg="Lab has no storage nodes. Skip rebooting storage node.")

        host = hosts[0]

    LOG.tc_step("Pre-check for system status")
    system_helper.wait_for_services_enable()
    up_hypervisors = host_helper.get_up_hypervisors()
    network_helper.wait_for_agents_healthy(hosts=up_hypervisors)

    LOG.tc_step("Launch a vm")
    vm_id = vm_helper.boot_vm(cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Reboot a {} node and wait for reboot completes: {}".format(host_type, host))
    HostsToRecover.add(host)
    host_helper.reboot_hosts(host)
    host_helper.wait_for_hosts_ready(host)

    LOG.tc_step("Check vm is still active and pingable after {} reboot".format(host))
    vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, timeout=VMTimeout.DHCP_RETRY)

    LOG.tc_step("Check neutron agents and system services are in good state after {} reboot".format(host))
    network_helper.wait_for_agents_healthy(up_hypervisors)
    system_helper.wait_for_services_enable()

    if host in up_hypervisors:
        LOG.tc_step("Check {} can still host vm after reboot".format(host))
        if not vm_helper.get_vm_host(vm_id) == host:
            time.sleep(30)
            vm_helper.live_migrate_vm(vm_id, destination_host=host)
Exemplo n.º 8
0
def pb_migrate_test(backup_info, con_ssh, vm_ids=None):
    """
    Run migration test before doing system backup.

    Args:
        backup_info:
            - options for doing backup

        con_ssh:
            - current ssh connection

        vm_ids
    Return:
        None
    """

    hyporvisors = host_helper.get_up_hypervisors(con_ssh=con_ssh)
    if len(hyporvisors) < 2:
        LOG.info(
            'Only {} hyporvisors, it is not enougth to test migration'.format(
                len(hyporvisors)))
        LOG.info('Skip migration test')
        return 0
    else:
        LOG.debug('There {} hyporvisors'.format(len(hyporvisors)))

    LOG.info('Randomly choose some VMs and do migrate:')

    target = random.choice(vm_ids)
    LOG.info('-OK, test migration of VM:{}'.format(target))

    original_host = vm_helper.get_vm_host(target)
    LOG.info('Original host:{}'.format(original_host))

    vm_helper.live_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After live-migration, host:{}'.format(original_host))

    if original_host == current_host:
        LOG.info('backup_info:{}'.format(backup_info))
        LOG.warn(
            'VM is still on its original host, live-migration failed? original host:{}'
            .format(original_host))

    original_host = current_host
    vm_helper.cold_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After code-migration, host:{}'.format(current_host))
    if original_host == current_host:
        LOG.warn(
            'VM is still on its original host, code-migration failed? original host:{}'
            .format(original_host))
Exemplo n.º 9
0
def live_migrate_vm(end_time, end_event):
    ded_flv = nova_helper.create_flavor(name='dedicated', vcpus=2)[1]
    nova_helper.set_flavor(ded_flv, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    vm_id = vm_helper.boot_vm(name='live-mig', flavor=ded_flv)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    while time.time() < end_time:
        if end_event.is_set():
            assert 0, "Other thread failed. Terminate live-mgiration thread."

        time.sleep(15)
        LOG.tc_step("Live migrate live-mig vm")
        vm_helper.live_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
def _perform_nova_actions(vms_dict, flavors, vfs=None):
    for vm_name, vm_id in vms_dict.items():
        LOG.tc_step("Cold migrate VM {} ....".format(vm_name))
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Live migrate VM {} ....".format(vm_name))
        expt_codes = [0] if 'vm_no_crypto' in vm_name else [1, 6]
        code, msg = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=True)
        assert code in expt_codes, "Expect live migrate to fail for vm with pci device attached. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Suspend/Resume VM {} ....".format(vm_name))
        vm_helper.suspend_vm(vm_id)
        vm_helper.resume_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        if vfs is None:
            resize_flavor_id = flavors["flavor_resize_qat_vf_1"] if "no_crypto" not in vm_name else \
                flavors["flavor_resize_none"]
        else:
            resize_flavor_id = flavors['flavor_resize_qat_vf_{}'.format(vfs)]

        LOG.info("Resizing VM {} to new flavor {} ...".format(vm_name, resize_flavor_id))
        vm_helper.resize_vm(vm_id, resize_flavor_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
Exemplo n.º 11
0
def _check_anti_affinity_vms():
    storage_backing, hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
    best_effort = True if len(hosts) < 3 else False
    anti_affinity_vms = nova_helper.get_server_group_info(group_name='grp_anti_affinity', headers='Members')[0]

    check_vm_hosts(vms=anti_affinity_vms, policy='anti_affinity', best_effort=best_effort)

    vm_hosts = []
    for vm_id in anti_affinity_vms:
        vm_helper.wait_for_vm_status(vm_id=vm_id, check_interval=10)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        vm_helper.live_migrate_vm(vm_id=vm_id)
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        vm_hosts.append(vm_helper.get_vm_host(vm_id))

    return vm_hosts, anti_affinity_vms
Exemplo n.º 12
0
def test_live_migrate_vm_positive(hosts_per_stor_backing, storage_backing,
                                  ephemeral, swap, cpu_pol, vcpus, vm_type,
                                  block_mig):
    """
    Skip Condition:
        - Less than two hosts have specified storage backing

    Test Steps:
        - create flavor with specified vcpus, cpu_policy, ephemeral, swap,
        storage_backing
        - boot vm from specified boot source with above flavor
        - (attach volume to vm if 'image_with_vol', specified in vm_type)
        - Live migrate the vm with specified block_migration flag
        - Verify VM is successfully live migrated to different host

    Teardown:
        - Delete created vm, volume, flavor

    """
    if len(hosts_per_stor_backing.get(storage_backing, [])) < 2:
        skip("Less than two hosts have {} storage backing".format(
            storage_backing))

    vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol,
                                vcpus, vm_type)

    prev_vm_host = vm_helper.get_vm_host(vm_id)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
    file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                     ephemeral=ephemeral,
                                                     swap=swap, vm_type=vm_type,
                                                     disks=vm_disks)

    LOG.tc_step("Live migrate VM and ensure it succeeded")
    # block_mig = True if boot_source == 'image' else False
    code, output = vm_helper.live_migrate_vm(vm_id, block_migrate=block_mig)
    assert 0 == code, "Live migrate is not successful. Details: {}".format(
        output)

    post_vm_host = vm_helper.get_vm_host(vm_id)
    assert prev_vm_host != post_vm_host

    LOG.tc_step("Ensure vm is pingable from NatBox after live migration")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.tc_step("Check files after live migrate")
    check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing,
                                ephemeral=ephemeral, swap=swap,
                                vm_type=vm_type, vm_action='live_migrate',
                                file_paths=file_paths, content=content,
                                disks=vm_disks, prev_host=prev_vm_host,
                                post_host=post_vm_host)
Exemplo n.º 13
0
def _check_affinity_vms():
    affinity_vms = nova_helper.get_server_group_info(group_name='grp_affinity', headers='Members')[0]
    vm_host = check_vm_hosts(vms=affinity_vms, policy='affinity')[0]

    for vm_id in affinity_vms:
        vm_helper.wait_for_vm_status(vm_id=vm_id, check_interval=10)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        res, out = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=True)
        assert res in (1, 2, 6), out

        res, out = vm_helper.cold_migrate_vm(vm_id=vm_id, fail_ok=True)
        assert res in (1, 2), out

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    return vm_host, affinity_vms
Exemplo n.º 14
0
    def test_shared_cpu_migrate(self, config_host_cpus):
        """
        Test vm with shared cpus enabled can successful live migrate to a node with shared vcpus enabled and fails when
        it tries to migrate to a node with shared vcpus disabled

        Setup:
            - Skip if there are less than 3 hosts
            - Configure at least one compute to disable shared vcpus
            - Configure at least two computes to have shared cpus via
                'system host-cpu-modify -f shared p0=1,p1=1 <hostname>' (module)

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor
            - Ensure vm is booted successfully
            - Perform a non-forced live migration on vm. Ensure that vm is on a shared cpu host.
            - Perform a non-forced cold migration on vm. Ensure that vm is on a shared cpu host.
            - Force live-migrate vm to host with shared vcpus enabled. The migration should succeed
                - Ensure that the vm is on a different host
            - Force live-migrate vm to the host with disabled shared vcpus. The migration should fail
                - Verify error by ensuring that vm is still on same host and grep nova-scheduler logs for
                'CANNOT SCHEDULE'

        Teardown:
            - Delete created vm if any (function)
            - Revert any hosts that were changed for this test

        """

        storage_backing, disable_shared_cpu_host, enabled_shared_hosts = config_host_cpus

        LOG.tc_step("Create a flavor with given number of vcpus")
        flavor = create_shared_flavor(vcpus=2, storage_backing=storage_backing, shared_vcpu=1)
        nova_helper.set_flavor(flavor, **{FlavorSpec.MEM_PAGE_SIZE: 2048})

        LOG.tc_step("Boot a vm with above flavor, and ensure vm is booted successfully")
        vm_id = vm_helper.boot_vm(name='shared_cpu', flavor=flavor, fail_ok=False, cleanup='function')[1]
        origin_host = vm_helper.get_vm_host(vm_id)
        assert origin_host in enabled_shared_hosts, "VM not booted on shared cpu host"

        LOG.tc_step("Perform a non-forced live migration onto an enabled shared cpu host, expect success")
        vm_helper.live_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        new_host = vm_helper.get_vm_host(vm_id)
        assert new_host in enabled_shared_hosts, "VM not migrated on shared cpu host"

        LOG.tc_step("Perform a non-forced cold migration onto an enabled shared cpu host, expect success")
        vm_helper.cold_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        new_host = vm_helper.get_vm_host(vm_id)
        assert new_host in enabled_shared_hosts, "VM not migrated on shared cpu host"

        if new_host != enabled_shared_hosts[0]:
            dest_host = enabled_shared_hosts[0]
        else:
            dest_host = enabled_shared_hosts[1]

        LOG.tc_step("Perform second live migration onto an enabled shared cpu host, expect success")
        vm_helper.live_migrate_vm(vm_id, destination_host=dest_host)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Perform third live migration onto a disabled shared cpu host, expect failure")
        code = vm_helper.live_migrate_vm(vm_id, destination_host=disable_shared_cpu_host, fail_ok=True)[0]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        assert code > 0, "Migrate not rejected as expected"
        assert vm_helper.get_vm_host(vm_id) == dest_host, "VM not on same compute node"

        LOG.tc_step("Verify second live migration failed via nova-scheduler.log")
        req_id = get_failed_live_migrate_action_id(vm_id)
        grepcmd = "grep 'CANNOT SCHEDULE' /var/log/nova/nova-scheduler.log | grep {}".format(req_id)
        control_ssh = ControllerClient.get_active_controller()
        control_ssh.exec_cmd(grepcmd, fail_ok=False)
Exemplo n.º 15
0
    def test_launch_vm_with_shared_cpu(self, vcpus, shared_vcpu, error, add_shared_cpu, origin_total_vcpus):
        """
        Test boot vm cli returns error when system does not meet the shared cpu requirement(s) in given flavor

        Args:
            vcpus (int): number of vcpus to set when creating flavor
            shared_vcpu (int):
            error
            add_shared_cpu
            origin_total_vcpus

        Setup:
            - Configure one compute to have shared cpus via 'system host-cpu-modify -f shared p0=1,p1=1 <hostname>'

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, number of numa nodes, nume_node.0 , shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor
            - Ensure vm is booted successfully
            - Validate the shared cpu
            - Live migrate the vm
            - Re-validate the shared cpu
            - Cold migrate the vm
            - Re-validate the shared cpu

        Teardown:
            - Delete created vm if any (function)
            - Delete created volume if any (module)
            - Set shared cpus to 0 (default setting) on the compute node under test (module)

        """
        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu
        LOG.tc_step("Create a flavor with given number of vcpus")

        flavor = create_shared_flavor(vcpus, storage_backing=storage_backing, shared_vcpu=shared_vcpu)

        LOG.tc_step("Boot a vm with above flavor")
        code, vm_id, output = vm_helper.boot_vm(name='shared_cpu', flavor=flavor, fail_ok=True, cleanup='function')

        if error:
            LOG.tc_step("Check vm boot fail")
            assert 1 == code, "Expect error vm. Actual result: {}".format(output)
            LOG.tc_step("Ensure vm is in error state with expected fault message in nova show")
            vm_helper.wait_for_vm_values(vm_id, 10, status='ERROR', fail_ok=False)
            actual_fault = vm_helper.get_vm_fault_message(vm_id)
            expt_fault = 'shared vcpu with 0 requested dedicated vcpus is not allowed'
            assert expt_fault in actual_fault, "Expected fault message mismatch"
            return

        LOG.tc_step("Check vm booted successfully and shared cpu indicated in vm-topology")
        assert 0 == code, "Boot vm failed. Details: {}".format(output)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # live migrate
        LOG.tc_step("Live migrate vm and then ping vm from NatBox")
        vm_helper.live_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # cold migrate
        LOG.tc_step("Cold migrate vm and then ping vm from NatBox")
        vm_helper.cold_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)
def _test_ea_max_vms_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify maximum number of guests with Crypto VFs can be launched and
    stabilized

    Args:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Pci device  {}".format(hosts_pci_device_info))

    flavor_id = _flavors['flavor_qat_vf_4']
    # Assume we only have 1 coleto creek pci device on system
    crypto_hosts = list(hosts_pci_device_info.keys())
    host = crypto_hosts[0]
    vf_info = hosts_pci_device_info[host][0]
    vf_device_id = vf_info['vf_device_id']
    vf_count = vf_info['vf_count']
    LOG.info("Vf_device_id {}, count: {}".format(vf_device_id, vf_count))

    # number of vms to launch to max out the total configured device VFs. Each VM is launched with 4 Vfs. 4 Vfs in each
    # compute are reserved for resize nova action.

    number_of_vms = int((vf_count - 4 * len(crypto_hosts)) / 4)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type}]

    vm_helper.ensure_vms_quotas(number_of_vms + 10)

    vms = {}
    LOG.tc_step("Launch {} vms using flavor flavor_qat_vf_4 and nics {}".format(number_of_vms, nics))
    for i in range(1, number_of_vms + 1):
        vm_name = 'vm_crypto_{}'.format(i)
        vm_id = vm_helper.boot_vm(cleanup='function', name='vm_crypto_{}'.format(i), nics=nics, flavor=flavor_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vms[vm_name] = vm_id

    for vm_name_, vm_id_ in vms.items():
        vm_host = vm_helper.get_vm_host(vm_id_)
        host_dev_name = hosts_pci_device_info[vm_host][0]['device_name']
        expt_qat_devs = {host_dev_name: 4}
        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)

        LOG.info("Checking if other host has room for cold migrate vm {}".format(vm_name_))
        for host_ in crypto_hosts:
            if host_ != vm_host:
                total_vfs, used_vfs = network_helper.get_pci_device_vfs_counts_for_host(
                    host_, device_id=vf_device_id, fields=('pci_vfs_configured', 'pci_vfs_used'))

                if int(total_vfs) - int(used_vfs) >= 4:
                    LOG.info("Migrate to other host is possible")
                    expt_res = 0
                    break
        else:
            LOG.info("Migrate to other host is not possible")
            expt_res = 2

        LOG.tc_step("Attempt to cold migrate {} and ensure it {}".format(vm_name_,
                                                                         'succeeds' if expt_res == '0' else 'fails'))
        rc, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert expt_res == rc, "Expected: {}. Actual: {}".format(expt_res, msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        LOG.tc_step("Suspend/resume VM {} ....".format(vm_name_))
        vm_helper.suspend_vm(vm_id_)
        vm_helper.resume_vm(vm_id_)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # vm_host = nova_helper.get_vm_host(vm_id_)
        # total, used = network_helper.get_pci_device_vfs_counts_for_host(vm_host, vf_device_id)[0]
        # if (total - int(used)) >= 4:
        #     expt_res = 0

        flavor_resize_id = _flavors['flavor_resize_qat_vf_4']
        LOG.tc_step("Resize VM {} to new flavor {} with increased memory...".format(vm_name_, flavor_resize_id))
        vm_helper.resize_vm(vm_id_, flavor_resize_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # else:
        #     expt_res = 1
        #     LOG.info("Resizing of vm {} skipped; host {} max out vfs; used vfs = {}".format(vm_name_, vm_host, used))

        LOG.tc_step("Attempt to live migrate {} and ensure it's rejected".format(vm_name_))
        rc, msg = vm_helper.live_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert 6 == rc, "Expect live migration to fail on vm with pci alias device. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)
Exemplo n.º 17
0
def test_snat_vm_actions(snat_setups, snat):
    """
    Test VM external access over VM launch, live-migration, cold-migration, pause/unpause, etc

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Enable/Disable SNAT based on snat param
        - Ping from VM to 8.8.8.8
        - wget <lab_fip> to VM
        - scp from NatBox to VM
        - Live-migrate the VM and verify ping from VM
        - Cold-migrate the VM and verify ping from VM
        - Pause and un-pause the VM and verify ping from VM
        - Suspend and resume the VM and verify ping from VM
        - Stop and start the VM and verify ping from VM
        - Reboot the VM and verify ping from VM

    Test Teardown:
        - Enable snat for next test in the same module     (function)
        - Delete the created vm     (module)
        - Disable snat  (module)

    """
    vm_ = snat_setups[0]
    snat = True if snat == 'snat_enabled' else False
    LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat))
    network_helper.set_router_gateway(enable_snat=snat)

    # Allow router update to complete, since we've seen cases where ping vm pass but ssh fail
    time.sleep(30)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=snat)

    LOG.tc_step("Ping from VM {} to 8.8.8.8".format(vm_))
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("wget to VM {}".format(vm_))
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_, use_fip=True) as vm_ssh:
        vm_ssh.exec_cmd('wget google.ca', fail_ok=False)

    LOG.tc_step("scp from NatBox to VM {}".format(vm_))
    vm_fip = network_helper.get_external_ips_for_vms(vms=vm_)[0]
    natbox_ssh = NATBoxClient.get_natbox_client()
    natbox_ssh.scp_on_source(source_path='test', dest_user='******', dest_ip=vm_fip, dest_path='/tmp/',
                             dest_password='******', timeout=30)

    LOG.tc_step("Live-migrate the VM and verify ping from VM")
    vm_helper.live_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Cold-migrate the VM and verify ping from VM")
    vm_helper.cold_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Pause and un-pause the VM and verify ping from VM")
    vm_helper.pause_vm(vm_)
    vm_helper.unpause_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Suspend and resume the VM and verify ping from VM")
    vm_helper.suspend_vm(vm_)
    vm_helper.resume_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Stop and start the VM and verify ping from VM")
    vm_helper.stop_vms(vm_)
    vm_helper.start_vms(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Reboot the VM and verify ping from VM")
    vm_helper.reboot_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Resize the vm to a flavor with 2 dedicated cpus and verify ping from VM")
    new_flv = nova_helper.create_flavor(name='ded', vcpus=2)[1]
    ResourceCleanup.add('flavor', new_flv, scope='module')
    nova_helper.set_flavor(new_flv, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    vm_helper.resize_vm(vm_, new_flv)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)
Exemplo n.º 18
0
def test_dpdk_live_migrate_latency(ovs_dpdk_1_core, launch_instances,
                                   no_simplex, no_duplex):
    con_ssh = ssh.ControllerClient.get_active_controller()
    prev_st = None
    prev_et = None
    res = list()

    for i in range(20):
        LOG.tc_step("Start of iter {}".format(i))
        vm_host = vm_helper.get_vm_host(launch_instances)
        cmd_get_pod_name = ("kubectl get pods -n openstack | "
                            "grep --color=never nova-compute-{} | "
                            "awk '{{print$1}}'".format(vm_host))
        pod_name = con_ssh.exec_cmd(cmd=cmd_get_pod_name)[1].rstrip().lstrip()
        cmd_get_start_date = (
            "kubectl -n openstack logs {} -c nova-compute | "
            "grep --color=never 'instance: {}' | "
            "grep --color=never 'pre_live_migration on destination host' | "
            "tail -1 | "
            "awk '{{print $1 \" \" $2}}'".format(pod_name, launch_instances))
        cmd_get_end_date = (
            "kubectl -n openstack logs {} -c nova-compute | "
            "grep --color=never 'instance: {}' | "
            "egrep --color=never "
            "'Migrating instance to [a-zA-Z]+-[0-9] finished successfully' | "
            "tail -1 | "
            "awk '{{print $1 \" \" $2}}'".format(pod_name, launch_instances))

        vm_helper.live_migrate_vm(vm_id=launch_instances)

        st = con_ssh.exec_cmd(cmd=cmd_get_start_date)[1]
        et = con_ssh.exec_cmd(cmd=cmd_get_end_date)[1]
        st_date = datetime.datetime.strptime(st, '%Y-%m-%d %H:%M:%S.%f')
        et_date = datetime.datetime.strptime(et, '%Y-%m-%d %H:%M:%S.%f')
        if i == 0:
            prev_st = st_date
            prev_et = et_date
        elif i > 0:
            if st_date <= prev_st or et_date <= prev_et:
                msg = ("new start time {} is less "
                       "or equal than old start time {}\n"
                       "or new end time {} is less "
                       "or equal than old end time "
                       "{}".format(st_date, prev_st, et_date, prev_et))
                LOG.error(msg)
                raise Exception(msg)
            else:
                prev_st = st_date
                prev_et = et_date
        diff = et_date - st_date
        LOG.info("\nstart time = {}\nend time = {}".format(st, et))
        LOG.info("\ndiff = {}".format(diff))
        res.append(diff)

    def calc_avg(lst):
        rtrn_sum = datetime.timedelta()
        for i in lst:
            LOG.info("Iter {}: {}".format(lst.index(i), i))
            rtrn_sum += i
        return rtrn_sum / len(lst)

    final_res = calc_avg(res)
    LOG.info("Avg time is : {}".format(final_res))
Exemplo n.º 19
0
    def _prepare_test(vm1, vm2, get_hosts, with_router):
        """
        VMs:
            VM1: under test (primary tenant)
            VM2: traffic observer
        """

        vm1_host = vm_helper.get_vm_host(vm1)
        vm2_host = vm_helper.get_vm_host(vm2)
        vm1_router = network_helper.get_tenant_router(
            auth_info=Tenant.get_primary())
        vm2_router = network_helper.get_tenant_router(
            auth_info=Tenant.get_secondary())
        vm1_router_host = network_helper.get_router_host(router=vm1_router)
        vm2_router_host = network_helper.get_router_host(router=vm2_router)
        targets = list(get_hosts)

        if vm1_router_host == vm2_router_host:
            end_time = time.time() + 360
            while time.time() < end_time:
                vm1_router_host = network_helper.get_router_host(
                    router=vm1_router)
                vm2_router_host = network_helper.get_router_host(
                    router=vm2_router)
                if vm1_router_host != vm2_router_host:
                    break
            else:
                assert vm1_router_host != vm2_router_host, "two routers are located on the same compute host"

        if not with_router:
            """
            Setup:
                VM1 on COMPUTE-A
                VM2 not on COMPUTE-A
                ROUTER1 on COMPUTE-B
                ROUTER2 on COMPUTE-C
            """
            if len(get_hosts) < 3:
                skip(
                    "Lab not suitable for without_router, requires at least three hypervisors"
                )

            LOG.tc_step(
                "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute"
            )
            if vm2_host != vm2_router_host:
                vm_helper.live_migrate_vm(vm_id=vm2,
                                          destination_host=vm2_router_host)
                vm2_host = vm_helper.get_vm_host(vm2)
                assert vm2_host == vm2_router_host, "live-migration failed"
            host_observer = vm2_host

            LOG.tc_step(
                "Ensure VM1 and (ROUTER1, VM2, ROUTER2) are on different hosts"
            )
            if vm1_router_host in targets:
                # ensure vm1_router_host is not selected for vm1
                # vm1_router_host can be backed by any type of storage
                targets.remove(vm1_router_host)
            if vm2_host in targets:
                targets.remove(vm2_host)

            if vm1_host in targets:
                host_src_evacuation = vm1_host
            else:
                assert targets, "no suitable compute for vm1, after excluding ROUTER1, VM2, ROUTER2 's hosts"
                host_src_evacuation = targets[0]
                vm_helper.live_migrate_vm(vm_id=vm1,
                                          destination_host=host_src_evacuation)
                vm1_host = vm_helper.get_vm_host(vm1)
                assert vm1_host == host_src_evacuation, "live-migration failed"

            # verify setup
            vm1_host = vm_helper.get_vm_host(vm1)
            vm2_host = vm_helper.get_vm_host(vm2)
            vm1_router_host = network_helper.get_router_host(router=vm1_router)
            vm2_router_host = network_helper.get_router_host(router=vm2_router)
            assert vm1_router_host != vm1_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \
                "setup is incorrect"
        else:
            """
            Setup:
                VM1, ROUTER1 on COMPUTE-A
                VM2 not on COMPUTE-A
                ROUTER2 on COMPUTE-B 
            """
            LOG.tc_step("Ensure VM1, ROUTER1 on COMPUTE-A")

            # VM1 must be sitting on ROUTER1's host, thus vm1_router_host must be backed by local_image
            assert vm1_router_host in targets, "vm1_router_host is not backed by local_image"

            if vm1_host != vm1_router_host:
                vm_helper.live_migrate_vm(vm_id=vm1,
                                          destination_host=vm1_router_host)
                vm1_host = vm_helper.get_vm_host(vm1)
                assert vm1_host == vm1_router_host, "live-migration failed"
            host_src_evacuation = vm1_host

            LOG.tc_step(
                "Ensure VM2, ROUTER2 not on COMPUTE-A, for simplicity, ensure they are on the same compute"
            )
            targets.remove(host_src_evacuation)
            if vm2_host in targets:
                host_observer = vm2_host
            else:
                assert targets, "no suitable compute for vm2, after excluding COMPUTE-A"
                host_observer = targets[0]
                vm_helper.live_migrate_vm(vm_id=vm2,
                                          destination_host=host_observer)
                vm2_host = vm_helper.get_vm_host(vm2)
                assert vm2_host == host_observer, "live-migration failed"

            # verify setup
            vm1_host = vm_helper.get_vm_host(vm1)
            vm2_host = vm_helper.get_vm_host(vm2)
            vm1_router_host = network_helper.get_router_host(router=vm1_router)
            vm2_router_host = network_helper.get_router_host(router=vm2_router)
            assert vm1_host == vm1_router_host and vm2_host != vm1_host and vm2_router_host != vm1_host, \
                "setup is incorrect"

        assert vm1_host == host_src_evacuation and vm2_host == host_observer, "setup is incorrect"
        LOG.info("Evacuate: VM {} on {}, ROUTER on {}".format(
            vm1, vm1_host, vm1_router_host))
        LOG.info("Observer: VM {} on {}, ROUTER on {}".format(
            vm2, vm2_host, vm2_router_host))

        return host_src_evacuation, host_observer
Exemplo n.º 20
0
 def operation_live(vm_id_):
     code, msg = vm_helper.live_migrate_vm(vm_id=vm_id_)
     assert 0 == code, msg
     vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
     # kernel routing
     vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)
Exemplo n.º 21
0
def test_vm_vcpu_model(vcpu_model, vcpu_source, boot_source,
                       cpu_models_supported):
    """
    Test vcpu model specified in flavor will be applied to vm. In case host does not support specified vcpu model,
    proper error message should be displayed in nova show.

    Args:
        vcpu_model
        vcpu_source
        boot_source

    Test Steps:
        - Set flavor extra spec or image metadata with given vcpu model.
        - Boot a vm from volume/image
        - Stop and then start vm and ensure that it retains its cpu model
        - If vcpu model is supported by host,
            - Check vcpu model specified in flavor/image is used by vm via virsh, ps aux (and /proc/cpuinfo)
            - Live migrate vm and check vcpu model again
            - Cold migrate vm and check vcpu model again
        - If vcpu model is not supported by host, check proper error message is included if host does not
            support specified vcpu model.
    Teardown:
        - Delete created vm, volume, image, flavor

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    flv_model = vcpu_model if vcpu_source == 'flavor' else None
    img_model = vcpu_model if vcpu_source == 'image' else None
    code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                        img_model=img_model,
                                        boot_source=boot_source)

    is_supported = (not vcpu_model) or (vcpu_model == 'Passthrough') or (
        vcpu_model in all_cpu_models_supported)
    if not is_supported:
        LOG.tc_step(
            "Check vm in error state due to vcpu model unsupported by hosts.")
        assert 1 == code, "boot vm cli exit code is not 1. Actual fail reason: {}".format(
            msg)

        expt_fault = VCPUSchedulerErr.CPU_MODEL_UNAVAIL
        res_bool, vals = vm_helper.wait_for_vm_values(vm,
                                                      10,
                                                      regex=True,
                                                      strict=False,
                                                      status='ERROR')
        err = vm_helper.get_vm_fault_message(vm)

        assert res_bool, "VM did not reach expected error state. Actual: {}".format(
            vals)
        assert re.search(expt_fault, err), "Incorrect fault reported. Expected: {} Actual: {}" \
            .format(expt_fault, err)
        return

    # System supports specified vcpu, continue to verify
    expt_arch = None
    if vcpu_model == 'Passthrough':
        host = vm_helper.get_vm_host(vm)
        expt_arch = host_helper.get_host_cpu_model(host)

    LOG.tc_step("Check vm is launched with expected vcpu model")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    multi_hosts_supported = (not vcpu_model) or (vcpu_model in cpu_models_multi_host) or \
                            (vcpu_model == 'Passthrough' and cpu_models_multi_host)
    # TC5141
    LOG.tc_step(
        "Stop and then restart vm and check if it retains its vcpu model")
    vm_helper.stop_vms(vm)
    vm_helper.start_vms(vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    if not multi_hosts_supported:
        LOG.info(
            "Skip migration steps. Less than two hosts in same storage aggregate support {}"
            .format(vcpu_model))
        return

    LOG.tc_step(
        "Live (block) migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.live_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)

    LOG.tc_step("Cold migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.cold_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)
Exemplo n.º 22
0
def test_vm_with_a_large_volume_live_migrate(vms_, pre_alarm_):
    """
    Test instantiate a vm with a large volume ( 20 GB and 40 GB) and live
    migrate:
    Args:
        vms_ (dict): vms created by vms_ fixture
        pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture

    Test Setups:
    - get tenant1 and management networks which are already created for lab
    setup
    - get or create a "small" flavor
    - get the guest image id
    - create two large volumes (20 GB and 40 GB) in cinder
    - boot two vms ( test_inst1, test_inst2) using  volumes 20 GB and 40 GB
    respectively


    Test Steps:
    - Verify VM status is ACTIVE
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify the VM can be pinged from NATBOX
    - Verify login to VM and rootfs (dev/vda) filesystem is rw mode
    - Attempt to live migrate of VMs
    - Validate that the VMs migrated and no errors or alarms are present
    - Log into both VMs and validate that file systems are read-write
    - Terminate VMs

    Skip conditions:
    - less than two computes
    - no storage node

    """
    for vm in vms_:
        vm_id = vm['id']

        LOG.tc_step(
            "Checking VM status; VM Instance id is: {}......".format(vm_id))
        vm_state = vm_helper.get_vm_status(vm_id)

        assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \
                                            'ACTIVATE state as expected' \
            .format(vm_id, vm_state)

        LOG.tc_step("Verify  VM can be pinged from NAT box...")
        rc, boot_time = check_vm_boot_time(vm_id)
        assert rc, "VM is not pingable after {} seconds ".format(boot_time)

        LOG.tc_step("Verify Login to VM and check filesystem is rw mode....")
        assert is_vm_filesystem_rw(
            vm_id), 'rootfs filesystem is not RW as expected for VM {}' \
            .format(vm['display_name'])

        LOG.tc_step(
            "Attempting  live migration; vm id = {}; vm_name = {} ....".format(
                vm_id, vm['display_name']))

        code, msg = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=False)
        LOG.tc_step("Verify live migration succeeded...")
        assert code == 0, "Expected return code 0. Actual return code: {}; " \
                          "details: {}".format(code, msg)

        LOG.tc_step(
            "Verifying  filesystem is rw mode after live migration....")
        assert is_vm_filesystem_rw(
            vm_id), 'After live migration rootfs filesystem is not RW as ' \
                    'expected for VM {}'. \
            format(vm['display_name'])
Exemplo n.º 23
0
def test_vm_with_large_volume_and_evacuation(vms_, pre_alarm_):
    """
   Test instantiate a vm with a large volume ( 20 GB and 40 GB) and evacuate:

    Args:
        vms_ (dict): vms created by vms_ fixture
        pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture

    Test Setups:
    - get tenant1 and management networks which are already created for lab
    setup
    - get or create a "small" flavor
    - get the guest image id
    - create two large volumes (20 GB and 40 GB) in cinder
    - boot two vms ( test_inst1, test_inst2) using  volumes 20 GB and 40 GB
    respectively


    Test Steps:
    - Verify VM status is ACTIVE
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify the VM can be pinged from NATBOX
    - Verify login to VM and rootfs (dev/vda) filesystem is rw mode
    - live migrate, if required, to bring both VMs to the same compute
    - Validate  migrated VM and no errors or alarms are present
    - Reboot compute host to initiate evacuation
    - Verify VMs are evacuated
    - Check for any system alarms
    - Verify login to VM and rootfs (dev/vda) filesystem is still rw mode
    after evacuation
    - Terminate VMs

    Skip conditions:
    - less that two computes
    - no  storage node

    """
    vm_ids = []
    for vm in vms_:
        vm_id = vm['id']
        vm_ids.append(vm_id)
        LOG.tc_step(
            "Checking VM status; VM Instance id is: {}......".format(vm_id))
        vm_state = vm_helper.get_vm_status(vm_id)
        assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \
                                            'ACTIVATE state as expected' \
            .format(vm_id, vm_state)

        LOG.tc_step("Verify  VM can be pinged from NAT box...")
        rc, boot_time = check_vm_boot_time(vm_id)
        assert rc, "VM is not pingable after {} seconds ".format(boot_time)

        LOG.tc_step("Verify Login to VM and check filesystem is rw mode....")
        assert is_vm_filesystem_rw(
            vm_id), 'rootfs filesystem is not RW as expected for VM {}' \
            .format(vm['display_name'])

    LOG.tc_step(
        "Checking if live migration is required to put the vms to a single "
        "compute....")
    host_0 = vm_helper.get_vm_host(vm_ids[0])
    host_1 = vm_helper.get_vm_host(vm_ids[1])

    if host_0 != host_1:
        LOG.tc_step("Attempting to live migrate  vm {} to host {} ....".format(
            (vms_[1])['display_name'], host_0))
        code, msg = vm_helper.live_migrate_vm(vm_ids[1],
                                              destination_host=host_0)
        LOG.tc_step("Verify live migration succeeded...")
        assert code == 0, "Live migration of vm {} to host {} did not " \
                          "success".format((vms_[1])['display_name'], host_0)

    LOG.tc_step("Verify both VMs are in same host....")
    assert host_0 == vm_helper.get_vm_host(
        vm_ids[1]), "VMs are not in the same compute host"

    LOG.tc_step(
        "Rebooting compute {} to initiate vm evacuation .....".format(host_0))
    vm_helper.evacuate_vms(host=host_0, vms_to_check=vm_ids, ping_vms=True)

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw((vms_[0])[
                                   'id']), 'After evacuation the rootfs ' \
                                           'filesystem is not RW as expected ' \
                                           'for VM {}'.format(
        (vms_[0])['display_name'])

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw((vms_[1])['id']), \
        'After evacuation the rootfs filesystem is not RW as expected ' \
        'for VM {}'.format((vms_[1])['display_name'])
Exemplo n.º 24
0
def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    image_id = glance_helper.get_image_id_from_name(
        GuestImages.DEFAULT['guest'], strict=True)
    if cpu_pol is not None:
        if pol_source == 'flavor':
            specs = {FlavorSpec.CPU_POLICY: cpu_pol}

            LOG.tc_step("Set following extra specs: {}".format(specs))
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            image_meta = {ImageMetadata.CPU_POLICY: cpu_pol}
            LOG.tc_step(
                "Create image with following metadata: {}".format(image_meta))
            image_id = glance_helper.create_image(
                name='cpu_pol_{}'.format(cpu_pol),
                cleanup='function',
                **image_meta)[1]
    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol),
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step(
        "Boot a vm from {} with above flavor and check vm topology is as "
        "expected".format(boot_source))
    vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus),
                              flavor=flavor_id,
                              source=boot_source,
                              source_id=source_id,
                              cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Suspend/Resume vm and check vm topology stays the same")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Stop/Start vm and check vm topology stays the same")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    prev_siblings = check_helper.check_topology_of_vm(
        vm_id,
        vcpus=flv_vcpus,
        cpu_pol=cpu_pol,
        vm_host=vm_host,
        prev_total_cpus=prev_cpus[vm_host])[1]

    LOG.tc_step("Live migrate vm and check vm topology stays the same")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host],
                                      prev_siblings=prev_siblings)

    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Exemplo n.º 25
0
def test_instantiate_a_vm_with_multiple_volumes_and_migrate():
    """
    Test  a vm with a multiple volumes live, cold  migration and evacuation:

    Test Setups:
    - get guest image_id
    - get or create 'small' flavor_id
    - get tenenat and managment network ids

    Test Steps:
    - create volume for boot and another extra size 8GB
    - boot vms from the created volume
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify VM status is ACTIVE
    - Attach the second volume to VM
    - Attempt live migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Attempt cold migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Reboot the compute host to initiate evacuation
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Terminate VMs

    Skip conditions:
    - less than two computes
    - less than one storage

    """
    # skip("Currently not working. Centos image doesn't see both volumes")
    LOG.tc_step("Creating a volume size=8GB.....")
    vol_id_0 = cinder_helper.create_volume(size=8)[1]
    ResourceCleanup.add('volume', vol_id_0, scope='function')

    LOG.tc_step("Creating a second volume size=8GB.....")
    vol_id_1 = cinder_helper.create_volume(size=8, bootable=False)[1]
    LOG.tc_step("Volume id is: {}".format(vol_id_1))
    ResourceCleanup.add('volume', vol_id_1, scope='function')

    LOG.tc_step("Booting instance vm_0...")

    vm_id = vm_helper.boot_vm(name='vm_0',
                              source='volume',
                              source_id=vol_id_0,
                              cleanup='function')[1]
    time.sleep(5)

    LOG.tc_step("Verify  VM can be pinged from NAT box...")
    rc, boot_time = check_vm_boot_time(vm_id)
    assert rc, "VM is not pingable after {} seconds ".format(boot_time)

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw(
        vm_id), 'vol_0 rootfs filesystem is not RW as expected.'

    LOG.tc_step("Attemping to attach a second volume to VM...")
    vm_helper.attach_vol_to_vm(vm_id, vol_id_1)

    LOG.tc_step(
        "Login to VM and to check filesystem is rw mode for both volumes....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'volumes rootfs ' \
                                                       'filesystem is not RW ' \
                                                       'as expected.'

    LOG.tc_step("Attemping live migrate VM...")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After live migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'

    LOG.tc_step("Attempting  cold migrate VM...")
    vm_helper.cold_migrate_vm(vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After cold migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'
    LOG.tc_step("Testing VM evacuation.....")
    before_host_0 = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Rebooting compute {} to initiate vm evacuation .....".format(
        before_host_0))
    vm_helper.evacuate_vms(host=before_host_0,
                           vms_to_check=vm_id,
                           ping_vms=True)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After evacuation ' \
                                                       'filesystem is not RW'
Exemplo n.º 26
0
def test_live_migrate_v1(vm_boot_type, vm_storage, vm_interface, block_migrate,
                         specify_host):
    """
    Live migrate VM with:
        various vm storage type,
        various vm interface types,
        with/without block migration,
        with/without specify host when sending live-migration request

    Expected results can be successful or rejected depending on the VM storage details and hosts storage backing.

    Args:
        vm_boot_type (str): e.g, image, volume
        vm_storage (str): VM storage. e.g., local_image, local_volume, remote
        vm_interface (str): VM interface, e.g., virio, avp
        block_migrate (bool): Whether to live-migrate with block migration
        specify_host (bool): Whether to specify host in live-migration request

    =====
    Prerequisites (requirement for the system):
        - system is preconfigured to test scenario under test

    Skip conditions:
        - at least two hypervisor hosts on the system

    Test Steps:
        - Find/boot a VM that satisfy the given criteria
        - Find a suitable host to migrate to if specify_host is True
        - Attempt to live-migrate
        - Verify the results based on the return code

    """
    # Make skip decision based on the value(s) of test param(s) and the system condition
    if vm_boot_type == 'image' and vm_tenant2_image_unavailable():
        skip("VM named 'tenant2-image' doesn't exist on the system")

    # Mark test start
    # LOG.tc_start()

    # Mark test steps when applicable
    LOG.tc_step("Boot vm if not already booted.")

    if vm_boot_type == 'image':
        vm_name = 'tenant2-image'
        vm_id = vm_helper.get_vm_id_from_name(vm_name)
    else:
        # boot from volume using launch script from lab_setup
        vm_id = vm_helper.launch_vms_via_script(vm_type=vm_interface,
                                                num_vms=1)[0]

    dest_host = ''
    if specify_host:
        # This step only applicable when test param specify_host=True
        LOG.tc_step("Getting specific destination host")
        dest_host = vm_helper.get_dest_host_for_live_migrate(vm_id)

    # Another test step
    LOG.tc_step("Attempt to live migrate VM")
    return_code, message = vm_helper.live_migrate_vm(
        vm_id,
        fail_ok=True,
        block_migrate=block_migrate,
        destination_host=dest_host)

    # Verify test results using assert
    LOG.tc_step("Verify test result")
    assert return_code in [0, 2], message
Exemplo n.º 27
0
def test_migrate_vm(check_system, guest_os, mig_type, cpu_pol):
    """
    Test migrate vms for given guest type
    Args:
        check_system:
        guest_os:
        mig_type:
        cpu_pol:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image
        - Live/cold migrate the vm
        - Ensure vm moved to other host and in good state (active and
            reachabe from NatBox)

    """
    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = \
        nova_helper.create_flavor(name='{}-mig'.format(mig_type), vcpus=1,
                                  root_disk=9, cleanup='function')[1]

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    image_id = glance_helper.get_guest_image(guest_os=guest_os)

    vol_id = cinder_helper.create_volume(source_id=image_id, size=9,
                                         guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm(guest_os, flavor=flavor_id, source='volume',
                              source_id=vol_id, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if guest_os == 'ubuntu_14':
        system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CINDER_IO_CONGEST,
                                          entity_id='cinder_io_monitor',
                                          strict=False, timeout=300,
                                          fail_ok=False)

    LOG.tc_step("{} migrate vm and check vm is moved to different host".format(
        mig_type))
    prev_vm_host = vm_helper.get_vm_host(vm_id)

    if mig_type == 'live':
        code, output = vm_helper.live_migrate_vm(vm_id)
        if code == 1:
            assert False, "No host to live migrate to. System may not be in " \
                          "good state."
    else:
        vm_helper.cold_migrate_vm(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    assert prev_vm_host != vm_host, "vm host did not change after {} " \
                                    "migration".format(mig_type)

    LOG.tc_step("Ping vm from NatBox after {} migration".format(mig_type))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Exemplo n.º 28
0
def test_live_migrate_vm_negative(storage_backing, ephemeral, swap, vm_type,
                                  block_mig, expt_err,
                                  hosts_per_stor_backing, no_simplex):
    """
    Skip Condition:
        - Less than two hosts have specified storage backing

    Test Steps:
        - create flavor with specified vcpus, cpu_policy, ephemeral, swap,
        storage_backing
        - boot vm from specified boot source with above flavor
        - (attach volume to vm if 'image_with_vol', specified in vm_type)
        - Live migrate the vm with specified block_migration flag
        - Verify VM is successfully live migrated to different host

    Teardown:
        - Delete created vm, volume, flavor

    """
    if len(hosts_per_stor_backing.get(storage_backing, [])) < 2:
        skip("Less than two hosts have {} storage backing".format(
            storage_backing))

    vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, None, 1,
                                vm_type)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    prev_vm_host = vm_helper.get_vm_host(vm_id)
    vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
    file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                     ephemeral=ephemeral,
                                                     swap=swap, vm_type=vm_type,
                                                     disks=vm_disks)

    LOG.tc_step(
        "Live migrate VM and ensure it's rejected with proper error message")
    # block_mig = True if boot_source == 'image' else False
    code, output = vm_helper.live_migrate_vm(vm_id, block_migrate=block_mig)
    assert 2 == code, "Expect live migration to have expected fail. Actual: " \
                      "{}".format(output)

    # Remove below code due to live-migration is async in newton
    # assert 'Unexpected API Error'.lower() not in output.lower(),
    # "'Unexpected API Error' returned."
    #
    # # remove extra spaces in error message
    # output = re.sub(r'\s\s+', " ", output)
    # assert eval(expt_err) in output, "Expected error message {} is not in
    # actual error message: {}".\
    #     format(eval(expt_err), output)

    post_vm_host = vm_helper.get_vm_host(vm_id)
    assert prev_vm_host == post_vm_host, "VM host changed even though live " \
                                         "migration request rejected."

    LOG.tc_step(
        "Ensure vm is pingable from NatBox after live migration rejected")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.tc_step("Check files after live migrate attempt")
    check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing,
                                ephemeral=ephemeral, swap=swap,
                                vm_type=vm_type, vm_action='live_migrate',
                                file_paths=file_paths, content=content,
                                disks=vm_disks, prev_host=prev_vm_host,
                                post_host=post_vm_host)
Exemplo n.º 29
0
def test_vm_with_max_vnics_attached_during_boot(base_vm, guest_os, nic_arg,
                                                boot_source):
    """
    Setups:
        - Boot a base vm with mgmt net and tenant_port_id (module)

    Test Steps:
        - Boot a vm with 1 mgmt and 15 avp/virtio Interfaces
        - Perform nova action (live migrate --force, live migrate, rebuild, reboot hard/soft, resize revert, resize)
        - ping between base_vm and vm_under_test over mgmt & tenant network

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm
    vif_type = 'avp' if system_helper.is_avs() else None

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'function'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    # TODO Update vif model config. Right now vif model avp still under implementation
    nics = [mgmt_nic]
    for i in range(15):
        if nic_arg == 'port_id':
            port_id = network_helper.create_port(tenant_net_id,
                                                 'tenant_port-{}'.format(i),
                                                 wrs_vif=vif_type,
                                                 cleanup='function')[1]
            nics.append({'port-id': port_id})
        else:
            nics.append({'net-id': tenant_net_id, 'vif-model': vif_type})

    LOG.tc_step(
        "Boot a {} vm and flavor from {} with 1 mgmt and 15 data interfaces".
        format(guest_os, boot_source))
    vm_under_test = vm_helper.boot_vm('max_vifs-{}-{}'.format(
        guest_os, boot_source),
                                      nics=nics,
                                      source=boot_source,
                                      image_id=image_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]

    vm_ports_count = len(network_helper.get_ports(server=vm_under_test))
    expt_vnics = 16
    LOG.info("vnics attached to VM: {}".format(vm_ports_count))
    assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

    _ping_vm_data(vm_under_test, vm_under_test, action='boot')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='configure routes')

    destination_host = vm_helper.get_dest_host_for_live_migrate(
        vm_id=vm_under_test)
    if destination_host:
        # LOG.tc_step("Perform following action(s) on vm {}: {}".format(vm_under_test, 'live-migrate --force'))
        # vm_helper.live_migrate_vm(vm_id=vm_under_test, destination_host=destination_host, force=True)
        # _ping_vm_data(vm_under_test, base_vm_id, action='live migrate --force')

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_under_test, 'live-migrate'))
        vm_helper.live_migrate_vm(vm_id=vm_under_test)
        _ping_vm_data(vm_under_test, base_vm_id, action='live-migrate')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'hard reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test, hard=True)
    _ping_vm_data(vm_under_test, base_vm_id, action='hard reboot')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'soft reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='soft rebuild')

    LOG.tc_step('Create destination flavor')
    dest_flavor_id = nova_helper.create_flavor(name='dest_flavor',
                                               vcpus=2,
                                               guest_os=guest_os)[1]

    LOG.tc_step('Resize vm to dest flavor and revert')
    vm_helper.resize_vm(vm_under_test,
                        dest_flavor_id,
                        revert=True,
                        fail_ok=False)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize revert')

    LOG.tc_step('Resize vm to dest flavor and revert False')
    vm_helper.resize_vm(vm_under_test, dest_flavor_id, fail_ok=False)
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'rebuild'))
    vm_helper.rebuild_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, vm_under_test, action='rebuild')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='rebuild')
Exemplo n.º 30
0
def _test_basic_swift_provisioning(pool_size, pre_swift_check):
    """
    Verifies basic swift provisioning works as expected
    Args:
        pool_size:
        pre_swift_check:

    Returns:

    """
    ceph_backend_info = get_ceph_backend_info()

    if pool_size == 'default' and pre_swift_check[0]:
        skip("Swift is already provisioned")

    if pool_size == 'fixed_size' and pre_swift_check[0]:
        skip("Swift is already provisioned and set to non-default pool value")

    object_pool_gib = None
    cinder_pool_gib = ceph_backend_info['cinder_pool_gib']

    if pool_size == 'default':
        if not ceph_backend_info['object_gateway']:
            LOG.tc_step("Enabling SWIFT object store .....")

    else:
        if not ceph_backend_info['object_gateway']:
            skip("Swift is not provisioned")

        total_gib = ceph_backend_info['ceph_total_space_gib']
        unallocated_gib = (total_gib - cinder_pool_gib -
                           ceph_backend_info['glance_pool_gib'] -
                           ceph_backend_info['ephemeral_pool_gib'])
        if unallocated_gib == 0:
            unallocated_gib = int(int(cinder_pool_gib) / 4)
            cinder_pool_gib = str(int(cinder_pool_gib) - unallocated_gib)
        elif unallocated_gib < 0:
            skip("Unallocated gib < 0. System is in unknown state.")

        object_pool_gib = str(unallocated_gib)
        LOG.tc_step(
            "Enabling SWIFT object store and setting object pool size to {}....."
            .format(object_pool_gib))

    rc, updated_backend_info = storage_helper.modify_storage_backend(
        'ceph',
        object_gateway=False,
        cinder=cinder_pool_gib,
        object_gib=object_pool_gib,
        services='cinder,glance,nova,swift')

    LOG.info("Verifying if swift object gateway is enabled...")
    assert str(updated_backend_info['object_gateway']).lower() == 'true', "Fail to enable Swift object gateway: {}"\
        .format(updated_backend_info)
    LOG.info("Swift object gateway is enabled.")

    LOG.info("Verifying ceph task ...")
    state = storage_helper.get_storage_backends(backend='ceph',
                                                field='state')[0]
    if system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE,
                                    timeout=10,
                                    fail_ok=True,
                                    entity_id='controller-')[0]:
        LOG.info("Verifying ceph task is set to 'add-object-gateway'...")
        assert BackendState.CONFIGURING == state, \
            "Unexpected ceph state '{}' after swift object gateway update ".format(state)

        LOG.info("Lock/Unlock controllers...")
        active_controller, standby_controller = system_helper.get_active_standby_controllers(
        )
        LOG.info("Active Controller is {}; Standby Controller is {}...".format(
            active_controller, standby_controller))

        for controller in [standby_controller, active_controller]:
            if not controller:
                continue
            HostsToRecover.add(controller)
            host_helper.lock_host(controller, swact=True)
            storage_helper.wait_for_storage_backend_vals(
                backend='ceph-store',
                **{
                    'task': BackendTask.RECONFIG_CONTROLLER,
                    'state': BackendState.CONFIGURING
                })
            host_helper.unlock_host(controller)

        system_helper.wait_for_alarm_gone(
            alarm_id=EventLogID.CONFIG_OUT_OF_DATE, fail_ok=False)
    else:
        assert BackendState.CONFIGURED == state, \
            "Unexpected ceph state '{}' after swift object gateway update ".format(state)

    LOG.info("Verifying Swift provisioning setups...")
    assert verify_swift_object_setup(), "Failure in swift setups"

    for i in range(3):
        vm_name = 'vm_swift_api_{}'.format(i)
        LOG.tc_step(
            "Boot vm {} and perform nova actions on it".format(vm_name))
        vm_id = vm_helper.boot_vm(name=vm_name, cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(
            vm_id, timeout=VMTimeout.DHCP_RETRY)

        LOG.info("Cold migrate VM {} ....".format(vm_name))
        rc = vm_helper.cold_migrate_vm(vm_id=vm_id)[0]
        assert rc == 0, "VM {} failed to cold migrate".format(vm_name)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.info("Live migrate VM {} ....".format(vm_name))
        rc = vm_helper.live_migrate_vm(vm_id=vm_id)[0]
        assert rc == 0, "VM {} failed to live migrate".format(vm_name)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.info("Suspend/Resume VM {} ....".format(vm_name))
        vm_helper.suspend_vm(vm_id)
        vm_helper.resume_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.info("Checking overall system health...")
    assert system_helper.get_system_health_query(
    ), "System health not OK after VMs"

    LOG.tc_step("Create Swift container using swift post cli command ...")
    container_names = [
        "test_container_1", "test_container_2", "test_container_3"
    ]

    for container in container_names:
        LOG.info("Creating swift object container {}".format(container))
        rc, out = swift_helper.create_swift_container(container)
        assert rc == 0, "Fail to create swift container {}".format(container)
        LOG.info(
            "Create swift object container {} successfully".format(container))

    LOG.tc_step("Verify swift list to list containers ...")
    container_list = swift_helper.get_swift_containers()[1]
    assert set(container_names) <= set(container_list), "Swift containers {} not listed in {}"\
        .format(container_names, container_list)

    LOG.tc_step("Verify swift delete a container...")
    container_to_delete = container_names[2]
    rc, out = swift_helper.delete_swift_container(container_to_delete)
    assert rc == 0, "Swift delete container rejected: {}".format(out)
    assert container_to_delete not in swift_helper.get_swift_containers()[1], "Unable to delete swift container {}"\
        .format(container_to_delete)

    LOG.tc_step("Verify swift stat to show info of a single container...")
    container_to_stat = container_names[0]
    out = swift_helper.get_swift_container_stat_info(container_to_stat)
    assert out["Container"] == container_to_stat, "Unable to stat swift container {}"\
        .format(container_to_stat)
    assert out["Objects"] == '0', "Incorrect number of objects container {}. Expected O objects, but has {} objects"\
        .format(container_to_stat, out["Objects"])