예제 #1
0
    def test_reboot_only_host(self, get_zone):
        """
        Test reboot only hypervisor on the system

        Args:
            get_zone: fixture to create stxauto aggregate, to ensure vms can
            only on one host

        Setups:
            - If more than 1 hypervisor: Create stxauto aggregate and add
            one host to the aggregate

        Test Steps:
            - Launch various vms on target host
                - vm booted from cinder volume,
                - vm booted from glance image,
                - vm booted from glance image, and have an extra cinder
                volume attached after launch,
                - vm booed from cinder volume with ephemeral and swap disks
            - sudo reboot -f only host
            - Check host is recovered
            - Check vms are recovered and reachable from NatBox

        """
        zone = get_zone

        LOG.tc_step("Launch 5 vms in {} zone".format(zone))
        vms = vm_helper.boot_vms_various_types(avail_zone=zone,
                                               cleanup='function')
        target_host = vm_helper.get_vm_host(vm_id=vms[0])
        for vm in vms[1:]:
            vm_host = vm_helper.get_vm_host(vm)
            assert target_host == vm_host, "VMs are not booted on same host"

        LOG.tc_step("Reboot -f from target host {}".format(target_host))
        HostsToRecover.add(target_host)
        host_helper.reboot_hosts(target_host)

        LOG.tc_step("Check vms are in Active state after host come back up")
        res, active_vms, inactive_vms = vm_helper.wait_for_vms_values(
            vms=vms, value=VMStatus.ACTIVE, timeout=600)

        vms_host_err = []
        for vm in vms:
            if vm_helper.get_vm_host(vm) != target_host:
                vms_host_err.append(vm)

        assert not vms_host_err, "Following VMs are not on the same host {}: " \
                                 "{}\nVMs did not reach Active state: {}". \
            format(target_host, vms_host_err, inactive_vms)

        assert not inactive_vms, "VMs did not reach Active state after " \
                                 "evacuated to other host: " \
                                 "{}".format(inactive_vms)

        LOG.tc_step("Check VMs are pingable from NatBox after evacuation")
        vm_helper.wait_for_vm_pingable_from_natbox(
            vms, timeout=VMTimeout.DHCP_RETRY)
예제 #2
0
def test_system_persist_over_host_reboot(host_type, stx_openstack_required):
    """
    Validate Inventory summary over reboot of one of the controller see if data persists over reboot

    Test Steps:
        - capture Inventory summary for list of hosts on system service-list and neutron agent-list
        - reboot the current Controller-Active
        - Wait for reboot to complete
        - Validate key items from inventory persist over reboot

    """
    if host_type == 'controller':
        host = system_helper.get_active_controller_name()
    elif host_type == 'compute':
        if system_helper.is_aio_system():
            skip("No compute host for AIO system")

        host = None
    else:
        hosts = system_helper.get_hosts(personality='storage')
        if not hosts:
            skip(msg="Lab has no storage nodes. Skip rebooting storage node.")

        host = hosts[0]

    LOG.tc_step("Pre-check for system status")
    system_helper.wait_for_services_enable()
    up_hypervisors = host_helper.get_up_hypervisors()
    network_helper.wait_for_agents_healthy(hosts=up_hypervisors)

    LOG.tc_step("Launch a vm")
    vm_id = vm_helper.boot_vm(cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Reboot a {} node and wait for reboot completes: {}".format(host_type, host))
    HostsToRecover.add(host)
    host_helper.reboot_hosts(host)
    host_helper.wait_for_hosts_ready(host)

    LOG.tc_step("Check vm is still active and pingable after {} reboot".format(host))
    vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, timeout=VMTimeout.DHCP_RETRY)

    LOG.tc_step("Check neutron agents and system services are in good state after {} reboot".format(host))
    network_helper.wait_for_agents_healthy(up_hypervisors)
    system_helper.wait_for_services_enable()

    if host in up_hypervisors:
        LOG.tc_step("Check {} can still host vm after reboot".format(host))
        if not vm_helper.get_vm_host(vm_id) == host:
            time.sleep(30)
            vm_helper.live_migrate_vm(vm_id, destination_host=host)
예제 #3
0
def test_live_migrate_vm_positive(hosts_per_stor_backing, storage_backing,
                                  ephemeral, swap, cpu_pol, vcpus, vm_type,
                                  block_mig):
    """
    Skip Condition:
        - Less than two hosts have specified storage backing

    Test Steps:
        - create flavor with specified vcpus, cpu_policy, ephemeral, swap,
        storage_backing
        - boot vm from specified boot source with above flavor
        - (attach volume to vm if 'image_with_vol', specified in vm_type)
        - Live migrate the vm with specified block_migration flag
        - Verify VM is successfully live migrated to different host

    Teardown:
        - Delete created vm, volume, flavor

    """
    if len(hosts_per_stor_backing.get(storage_backing, [])) < 2:
        skip("Less than two hosts have {} storage backing".format(
            storage_backing))

    vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol,
                                vcpus, vm_type)

    prev_vm_host = vm_helper.get_vm_host(vm_id)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
    file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                     ephemeral=ephemeral,
                                                     swap=swap, vm_type=vm_type,
                                                     disks=vm_disks)

    LOG.tc_step("Live migrate VM and ensure it succeeded")
    # block_mig = True if boot_source == 'image' else False
    code, output = vm_helper.live_migrate_vm(vm_id, block_migrate=block_mig)
    assert 0 == code, "Live migrate is not successful. Details: {}".format(
        output)

    post_vm_host = vm_helper.get_vm_host(vm_id)
    assert prev_vm_host != post_vm_host

    LOG.tc_step("Ensure vm is pingable from NatBox after live migration")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.tc_step("Check files after live migrate")
    check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing,
                                ephemeral=ephemeral, swap=swap,
                                vm_type=vm_type, vm_action='live_migrate',
                                file_paths=file_paths, content=content,
                                disks=vm_disks, prev_host=prev_vm_host,
                                post_host=post_vm_host)
예제 #4
0
def pb_migrate_test(backup_info, con_ssh, vm_ids=None):
    """
    Run migration test before doing system backup.

    Args:
        backup_info:
            - options for doing backup

        con_ssh:
            - current ssh connection

        vm_ids
    Return:
        None
    """

    hyporvisors = host_helper.get_up_hypervisors(con_ssh=con_ssh)
    if len(hyporvisors) < 2:
        LOG.info(
            'Only {} hyporvisors, it is not enougth to test migration'.format(
                len(hyporvisors)))
        LOG.info('Skip migration test')
        return 0
    else:
        LOG.debug('There {} hyporvisors'.format(len(hyporvisors)))

    LOG.info('Randomly choose some VMs and do migrate:')

    target = random.choice(vm_ids)
    LOG.info('-OK, test migration of VM:{}'.format(target))

    original_host = vm_helper.get_vm_host(target)
    LOG.info('Original host:{}'.format(original_host))

    vm_helper.live_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After live-migration, host:{}'.format(original_host))

    if original_host == current_host:
        LOG.info('backup_info:{}'.format(backup_info))
        LOG.warn(
            'VM is still on its original host, live-migration failed? original host:{}'
            .format(original_host))

    original_host = current_host
    vm_helper.cold_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After code-migration, host:{}'.format(current_host))
    if original_host == current_host:
        LOG.warn(
            'VM is still on its original host, code-migration failed? original host:{}'
            .format(original_host))
예제 #5
0
def test_heat_vm_scale_after_actions(vm_scaling_stack, actions):
    """
    Test VM auto scaling with swact:
        Create heat stack for auto scaling using NestedAutoScale.yaml,  swact and perform vm scale up and down.

    Test Steps:
        - Create a heat stack for auto scaling vm ()
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - live migrate the vm if not sx
        - cold migrate the vm if not sx
        - swact if not sx
        - reboot -f vm host
        - trigger auto scale by boosting cpu usage in the vm (using dd)
        - verify it scale up to the max number of vms (3)
        - trigger scale down by killing dd in the vm
        - verify the vm scale down to min number (1)
        - Delete Heat stack and verify resource deletion
    """
    stack_name, vm_id = vm_scaling_stack
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if not system_helper.is_aio_simplex():
        actions = actions.split('-')
        if "swact" in actions:
            LOG.tc_step("Swact before scale in/out")
            host_helper.swact_host()

        if "live_migrate" in actions:
            LOG.tc_step("live migrate vm before scale in/out")
            vm_helper.live_migrate_vm(vm_id)

        if "cold_migrate" in actions:
            LOG.tc_step("cold migrate vm before scale in/out")
            vm_helper.cold_migrate_vm(vm_id)

    if "host_reboot" in actions:
        if system_helper.is_aio_simplex():
            host_helper.reboot_hosts('controller-0')
            vm_helper.wait_for_vm_status(vm_id,
                                         status=VMStatus.ACTIVE,
                                         timeout=600,
                                         check_interval=10,
                                         fail_ok=False)
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id, timeout=VMTimeout.DHCP_RETRY)
        else:
            LOG.tc_step("evacuate vm before scale in/out")
            vm_host = vm_helper.get_vm_host(vm_id=vm_id)
            vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id)

    LOG.tc_step(
        "Wait for {} vms to auto scale out to {} after running dd in vm(s)".
        format(stack_name, 3))
    vm_helper.wait_for_auto_vm_scale_out(stack_name, expt_max=3)

    LOG.tc_step(
        "Wait for {} vms to auto scale in to {} after killing dd processes in vms"
        .format(stack_name, 1))
    vm_helper.wait_for_auto_vm_scale_in(stack_name, expt_min=1)
def test_ea_vm_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify guest can be launched with  one crypto VF, AVP, VIRTIO, and SRIOV interfaces.
    Verify device cannot be disabled while on use. ( mainly for labs with two computes)
    Args:
        _flavors:
        hosts_pci_device_info:

    """
    # hosts = list(hosts_pci_device_info.keys())
    vm_name = 'vm_with_pci_device'
    mgmt_net_id = network_helper.get_mgmt_net_id()

    nics = [{'net-id': mgmt_net_id}]

    flavor_id = _flavors['flavor_qat_vf_1']
    LOG.tc_step("Boot a vm  {} with pci-sriov nics and flavor flavor_qat_vf_1".format(vm_name))
    vm_id = vm_helper.boot_vm(vm_name, flavor=flavor_id, nics=nics, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    LOG.info("VM {} booted successfully and become active with crypto VF".format(vm_name))

    vm_host = vm_helper.get_vm_host(vm_id)
    pci_dev_info = hosts_pci_device_info[vm_host][0]['pci_address']
    # device_address = pci_dev_info['pci_address']
    host_dev_name = pci_dev_info['device_name']
    expt_qat_devs = {host_dev_name: 1}
    check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs)

    _perform_nova_actions(vms_dict={vm_name: vm_id}, flavors=_flavors)
    check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs)
예제 #7
0
def _test_check_vm_disk_on_compute(storage, hosts_per_backing):

    """
        Tests that existence of volumes are properly reported for lvm-backed vms.

        Skip:
            - Skip if no lvm-configured compute nodes available

        Test steps:
            - Create a flavor for a lvm-backed vms and boot vm out of that flavor
            - SSH onto the node hosting the VM and do the following:
                - Run ps aux and confirm that there is a qemu process
                - Run sudo lvs and confirm the existence of a thin pool
                - Run sudo lvs and confirm the existence of a volume for the vm
            - Ensure that the "free" space shown for the hypervisor (obtained by running
                "nova hypervisor-show <compute node>" and then checking the "free_disk_gb" field)
                reflects the space available within the thin pool
            - Delete the instance and ensure that space is returned to the hypervisor

        Test Teardown:
            - Delete created VM if not already done

    """

    hosts_with_backing = hosts_per_backing.get(storage, [])
    if not hosts_with_backing:
        skip(SkipStorageBacking.NO_HOST_WITH_BACKING.format(storage))

    LOG.tc_step("Create flavor and boot vm")
    flavor = nova_helper.create_flavor(storage_backing=storage)[1]
    ResourceCleanup.add('flavor', flavor, scope='function')
    vm = vm_helper.boot_vm(source='image', flavor=flavor, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm)
    vm_host = vm_helper.get_vm_host(vm)

    with host_helper.ssh_to_host(vm_host) as compute_ssh:
        LOG.tc_step("Look for qemu process")
        compute_ssh.exec_sudo_cmd(cmd="lvs --units g")
        assert check_for_qemu_process(compute_ssh), "qemu process not found when calling ps"

        LOG.tc_step("Look for pool information")
        thin_pool_size = get_initial_pool_space(compute_ssh, vm)

        vm_vol_name = vm + '_disk'
        raw_vm_volume_output = \
            compute_ssh.exec_sudo_cmd(cmd="lvs --units g --noheadings -o lv_size -S lv_name={}".format(vm_vol_name))[1]
        assert raw_vm_volume_output, "created vm volume not found"
        vm_volume_size = float(raw_vm_volume_output.strip('<g'))

    LOG.tc_step("Calculate compute free disk space and ensure that it reflects thin pool")
    expected_space_left = int(thin_pool_size - vm_volume_size)
    free_disk_space = get_compute_free_disk_gb(vm_host)
    assert expected_space_left - 1 <= free_disk_space <= expected_space_left + 1, \
        'Hypervisor-show does not reflect space within thin pool'

    LOG.tc_step("Calculate free space following vm deletion (ensure volume space is returned)")
    vm_helper.delete_vms(vm)
    free_disk_space = get_compute_free_disk_gb(vm_host)
    assert int(thin_pool_size) == free_disk_space, \
        'Space is not properly returned to the hypervisor or hypervisor info does not properly reflect it'
예제 #8
0
def check_vm_cpu_model(vm_id, vcpu_model, expt_arch=None):
    if vcpu_model == 'Passthrough':
        pattern_ps = 'host'
        pattern_virsh = 'host-passthrough'
        virsh_tag = 'cpu'
        type_ = 'dict'
    elif vcpu_model:
        virsh_tag = 'cpu/model'
        type_ = 'text'
        if vcpu_model == 'Haswell':
            pattern_ps = pattern_virsh = r'(haswell|haswell\-notsx)'
        else:
            pattern_ps = pattern_virsh = vcpu_model.lower()
    else:
        # vcpu model is not set
        pattern_ps = None
        pattern_virsh = None
        virsh_tag = 'cpu'
        type_ = 'dict'

    LOG.info(
        "Check vcpu model successfully applied to vm via ps aux and virsh dumpxml on vm host"
    )
    host = vm_helper.get_vm_host(vm_id)
    inst_name = vm_helper.get_vm_instance_name(vm_id)
    with host_helper.ssh_to_host(host) as host_ssh:
        output_ps = host_ssh.exec_cmd(
            "ps aux | grep --color='never' -i {}".format(vm_id),
            fail_ok=False)[1]
        output_virsh = host_helper.get_values_virsh_xmldump(
            inst_name, host_ssh, tag_paths=virsh_tag, target_type=type_)
        output_virsh = output_virsh[0]

    if vcpu_model:
        assert re.search(r'\s-cpu\s{}(\s|,)'.format(pattern_ps), output_ps.lower()), \
            'cpu_model {} not found for vm {}'.format(pattern_ps, vm_id)
    else:
        assert '-cpu' not in output_ps, "cpu model is specified in ps aux"

    if vcpu_model == 'Passthrough':
        assert output_virsh['mode'] == 'host-passthrough', \
            'cpu mode is not passthrough in virsh for vm {}'.format(vm_id)

        LOG.info("Check cpu passthrough model from within the vm")
        vm_vcpu_model = vm_helper.get_vcpu_model(vm_id)
        host_cpu_model = host_helper.get_host_cpu_model(host=host)
        assert host_cpu_model == vm_vcpu_model, "VM cpu model is different than host cpu model with cpu passthrough"

        if expt_arch:
            assert expt_arch == vm_vcpu_model, "VM cpu model changed. Original: {}. Current: {}".\
                format(expt_arch, vcpu_model)
    elif vcpu_model:
        assert re.search(pattern_virsh, output_virsh.lower()), \
            'cpu model {} is not found in virsh for vm {}'.format(pattern_virsh, vm_id)

    else:
        assert output_virsh == {}, "Virsh cpu output: {}".format(output_virsh)
        vm_vcpu_model = vm_helper.get_vcpu_model(vm_id)
        assert 'QEMU Virtual CPU' in vm_vcpu_model, "vCPU model is not QEMU Virtual CPU when unspecified"
예제 #9
0
def test_lock_unlock_secure_boot_vm():
    """
    This is to test host lock with secure boot vm.

    :return:
    """
    guests_os = ['trusty_uefi', 'uefi_shell']
    disk_format = ['qcow2', 'raw']
    image_ids = []
    volume_ids = []
    for guest_os, disk_format in zip(guests_os, disk_format):
        image_ids.append(
            create_image_with_metadata(
                guest_os=guest_os,
                property_key=ImageMetadata.FIRMWARE_TYPE,
                values=['uefi'],
                disk_format=disk_format,
                container_format='bare'))
    # create a flavor
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1]
    ResourceCleanup.add('flavor', flavor_id)
    # boot a vm using the above image
    for image_id in image_ids:
        volume_ids.append(
            cinder_helper.create_volume(source_id=image_id[0],
                                        size=5,
                                        cleanup='function')[1])

    block_device_dic = [{
        'id': volume_ids[1],
        'source': 'volume',
        'bootindex': 0
    }, {
        'id': volume_ids[0],
        'source': 'volume',
        'bootindex': 1
    }]

    vm_id = vm_helper.boot_vm(name='sec-boot-vm',
                              source='block_device',
                              flavor=flavor_id,
                              block_device=block_device_dic,
                              cleanup='function',
                              guest_os=guests_os[0])[1]

    _check_secure_boot_on_vm(vm_id=vm_id)

    # Lock the compute node with the secure Vms
    compute_host = vm_helper.get_vm_host(vm_id=vm_id)
    host_helper.lock_host(compute_host, timeout=800)
    if not system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
    host_helper.unlock_host(compute_host, timeout=800)

    if system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
예제 #10
0
def sys_lock_unlock_hosts(number_of_hosts_to_lock):
    """
        This is to test the evacuation of vms due to compute lock/unlock
    :return:
    """
    # identify a host with atleast 5 vms
    vms_by_compute_dic = vm_helper.get_vms_per_host()
    compute_to_lock = []
    vms_to_check = []
    hosts_threads = []
    timeout = 1000

    for k, v in vms_by_compute_dic.items():
        if len(v) >= 5:
            compute_to_lock.append(k)
            vms_to_check.append(v)

    if compute_to_lock is None:
        skip("There are no compute with 5 or moer vms")

    if len(compute_to_lock) > number_of_hosts_to_lock:
        compute_to_lock = compute_to_lock[0:number_of_hosts_to_lock]
        vms_to_check = vms_to_check[0:number_of_hosts_to_lock]
    else:
        LOG.warning(
            "There are only {} computes available with more than 5 vms ".
            format(len(compute_to_lock)))

    for host in compute_to_lock:
        new_thread = MThread(host_helper.lock_host, host)
        new_thread.start_thread(timeout=timeout + 30)
        hosts_threads.append(new_thread)

    for host_thr in hosts_threads:
        host_thr.wait_for_thread_end()

    LOG.tc_step("Verify lock succeeded and vms still in good state")
    for vm_list in vms_to_check:
        vm_helper.wait_for_vms_values(vms=vm_list, fail_ok=False)

    for host, vms in zip(compute_to_lock, vms_to_check):
        for vm in vms:
            vm_host = vm_helper.get_vm_host(vm_id=vm)
            assert vm_host != host, "VM is still on {} after lock".format(host)
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id=vm, timeout=VMTimeout.DHCP_RETRY)

    hosts_threads = []
    for host in compute_to_lock:
        new_thread = MThread(host_helper.unlock_host, host)
        new_thread.start_thread(timeout=timeout + 30)
        hosts_threads.append(new_thread)

    for host_thr in hosts_threads:
        host_thr.wait_for_thread_end()
def _test_ea_vm_co_existence_with_and_without_crypto_vfs(_flavors):
    """
    Verify guest with cypto VFs can co-exists with guest without crypto VFs.
    Args:
        _flavors:

    Returns:

    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    internal_net_id = network_helper.get_internal_net_id()
    vif_type = get_vif_type()

    vm_params = {'vm_no_crypto_1': [_flavors['flavor_none'], [{'net-id': mgmt_net_id},
                                                              {'net-id': tenant_net_ids[0], 'vif-model': vif_type},
                                                              {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_no_crypto_2': [_flavors['flavor_none'], [{'net-id': mgmt_net_id},
                                                              {'net-id': tenant_net_ids[1], 'vif-model': vif_type},
                                                              {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_sriov_crypto': [_flavors['flavor_qat_vf_1'],
                                     [{'net-id': mgmt_net_id},
                                      {'net-id': tenant_net_ids[2], 'vif-model': vif_type},
                                      {'net-id': internal_net_id, 'vif-model': 'pci-sriov'}]],
                 'vm_crypto_1': [_flavors['flavor_qat_vf_1'], [{'net-id': mgmt_net_id},
                                                               {'net-id': tenant_net_ids[3], 'vif-model': vif_type},
                                                               {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 'vm_crypto_2': [_flavors['flavor_qat_vf_1'], [{'net-id': mgmt_net_id},
                                                               {'net-id': tenant_net_ids[4], 'vif-model': vif_type},
                                                               {'net-id': internal_net_id, 'vif-model': vif_type}]],
                 }

    vms = {}
    vms_qat_devs = {}

    for vm_name, param in vm_params.items():
        LOG.tc_step("Boot vm {} with {} flavor".format(vm_name, param[0]))
        vm_id = vm_helper.boot_vm('{}'.format(vm_name), flavor=param[0], nics=param[1], cleanup='function')[1]

        LOG.info("Verify  VM can be pinged from NAT box...")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id), "VM is not pingable."
        vms[vm_name] = vm_id
        vm_host = vm_helper.get_vm_host(vm_id)
        host_dev_name = host_helper.get_host_devices(vm_host, field='device name',
                                                     **{'class id': DevClassID.QAT_VF})[0]
        expt_qat_devs = {} if '_no_crypto' in vm_name else {host_dev_name: 1}
        vms_qat_devs[vm_id] = expt_qat_devs
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs)

    _perform_nova_actions(vms, flavors=_flavors, vfs=None)

    for vm_id_, expt_qat_devs_ in vms_qat_devs.items():
        check_helper.check_qat_service(vm_id_, qat_devs=expt_qat_devs_)
예제 #12
0
def test_vcpu_model_and_thread_policy(vcpu_model, thread_policy,
                                      cpu_models_supported):
    """
    Launch vm with vcpu model spec and cpu thread policy both set
    Args:
        vcpu_model (str):
        thread_policy (str):
        cpu_models_supported (tuple): fixture

    Test Steps:
        - create flavor with vcpu model and cpu thread extra specs set
        - boot vm from volume with above flavor
        - if no hyperthreaded host, check vm failed to schedule
        - otherwise check vcpu model and cpu thread policy both set as expected

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    is_supported = (vcpu_model
                    == 'Passthrough') or (vcpu_model
                                          in all_cpu_models_supported)
    if not is_supported:
        skip("{} is not supported by any hypervisor".format(vcpu_model))

    name = '{}_{}'.format(vcpu_model, thread_policy)
    flv_id = nova_helper.create_flavor(name=name, vcpus=2)[1]
    ResourceCleanup.add('flavor', flv_id)
    nova_helper.set_flavor(flavor=flv_id,
                           **{
                               FlavorSpec.VCPU_MODEL: vcpu_model,
                               FlavorSpec.CPU_POLICY: 'dedicated',
                               FlavorSpec.CPU_THREAD_POLICY: thread_policy
                           })

    code, vm, msg = vm_helper.boot_vm(name=name,
                                      flavor=flv_id,
                                      fail_ok=True,
                                      cleanup='function')
    ht_hosts = host_helper.get_hypersvisors_with_config(hyperthreaded=True,
                                                        up_only=True)
    if thread_policy == 'require' and not ht_hosts:
        assert 1 == code

    else:
        assert 0 == code, "VM is not launched successfully"
        check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model)
        vm_host = vm_helper.get_vm_host(vm)
        check_helper.check_topology_of_vm(vm_id=vm,
                                          vcpus=2,
                                          cpu_pol='dedicated',
                                          cpu_thr_pol=thread_policy,
                                          numa_num=1,
                                          vm_host=vm_host)
예제 #13
0
    def test_multiports_on_same_network_evacuate_vm(self, vifs,
                                                    check_avs_pattern,
                                                    base_setup):
        """
        Test evacuate vm with multiple ports on same network

        Args:
            vifs (tuple): each item in the tuple is 1 nic to be added to vm with specified vif model
            base_setup (tuple): test fixture to boot base vm

        Setups:
            - create a flavor with dedicated cpu policy (class)
            - choose one tenant network and one internal network to be used by test (class)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (class)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant
                network with base vm,
            and ping it from NatBox     (class)
            - Ping vm2's own data network ips       (class)
            - Ping vm2 from vm1 to verify management and data networks connection   (class)

        Test Steps:
            - Reboot vm2 host
            - Wait for vm2 to be evacuated to other host
            - Wait for vm2 pingable from NatBox
            - Verify pci_address preserves
            - Verify ping from vm1 to vm2 over management and data networks still works

        Teardown:
            - Delete created vms and flavor
        """

        base_vm, flavor, mgmt_net_id, tenant_net_id, internal_net_id = base_setup
        vm_under_test, nics = _boot_multiports_vm(flavor=flavor,
                                                  mgmt_net_id=mgmt_net_id,
                                                  vifs=vifs,
                                                  net_id=tenant_net_id,
                                                  net_type='data',
                                                  base_vm=base_vm)

        host = vm_helper.get_vm_host(vm_under_test)

        LOG.tc_step("Reboot vm host {}".format(host))
        vm_helper.evacuate_vms(host=host,
                               vms_to_check=vm_under_test,
                               ping_vms=True)

        LOG.tc_step(
            "Verify ping from base_vm to vm_under_test over management and data networks "
            "still works after evacuation.")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                   from_vm=base_vm,
                                   net_types=['mgmt', 'data'])
예제 #14
0
def test_snat_evacuate_vm(snat_setups, snat):
    """
    Test VM external access after evacuation.

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.
        snat (bool): whether or not to enable SNAT on router

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Ping VM from NatBox
        - Reboot vm host
        - Verify vm is evacuated to other host
        - Verify vm can still ping outside

    Test Teardown:
        - Delete the created vm     (module)
        - Disable snat  (module)

    """
    vm_ = snat_setups[0]

    snat = True if snat == 'snat_enabled' else False
    LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat))
    network_helper.set_router_gateway(enable_snat=snat)

    time.sleep(30)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)

    host = vm_helper.get_vm_host(vm_)

    LOG.tc_step("Ping VM from NatBox".format(vm_))
    vm_helper.ping_vms_from_natbox(vm_, use_fip=False)
    # vm_helper.ping_vms_from_natbox(vm_, use_fip=True)

    LOG.tc_step("Evacuate vm")
    vm_helper.evacuate_vms(host=host, vms_to_check=vm_)

    LOG.tc_step("Verify vm can still ping outside")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat, timeout=VMTimeout.DHCP_RETRY)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    host_helper.wait_for_hosts_ready(hosts=host)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=False)
    if snat:
        vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=True)
예제 #15
0
def test_boot_vm_on_host(add_host_to_zone):
    target_host = add_host_to_zone

    vm_id = vm_helper.boot_vm(name='cgcsauto_zone',
                              avail_zone='cgcsauto',
                              vm_host=target_host,
                              cleanup='function')[1]

    assert target_host == vm_helper.get_vm_host(vm_id=vm_id)

    res, msg = vm_helper.cold_migrate_vm(vm_id=vm_id, fail_ok=True)

    assert 1 == res, "Expect cold migration reject due to no other host in cgcsauto zone, actual result: {}".format(
        msg)
예제 #16
0
def test_vcpu_model_resize(source_model, dest_model):
    """

    Args:
        source_model:
        dest_model:

    Test Steps:
        - Create a source flavor with 4G root disk and vcpu model extra spec as specified in source_model
        - Create a dest flavor with 5G root disk and vcpu model extra spec as specified in dest_model
        - Launch a vm from image with source flavor
        - Check vcpu_model is successfully applied
        - Resize the vm with dest flavor
        - Check new vcpu_model is successfully applied

    Teardown:
        - Delete created vm, image, flavors

    """
    LOG.tc_step(
        "Create a source flavor with 4G root disk and vcpu model extra spec: {}"
        .format(source_model))
    source_flv = _create_flavor_vcpu_model(vcpu_model=source_model,
                                           root_disk_size=4)

    LOG.tc_step(
        "Create a destination flavor with 5G root disk and vcpu model extra spec: {}"
        .format(source_model))
    dest_flv = _create_flavor_vcpu_model(vcpu_model=dest_model,
                                         root_disk_size=5)

    LOG.tc_step(
        "Launch a vm from image with source flavor {}".format(source_flv))
    vm_id = vm_helper.boot_vm(flavor=source_flv,
                              source='image',
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_vm_cpu_model(vm_id=vm_id, vcpu_model=source_model)

    expt_arch = None
    if source_model == dest_model == 'Passthrough':
        # Ensure vm resize to host with exact same cpu model when vcpu_model is passthrough
        host = vm_helper.get_vm_host(vm_id)
        expt_arch = host_helper.get_host_cpu_model(host)

    LOG.tc_step("Resize vm to destination flavor {}".format(dest_flv))
    vm_helper.resize_vm(vm_id, flavor_id=dest_flv)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_vm_cpu_model(vm_id, vcpu_model=dest_model, expt_arch=expt_arch)
예제 #17
0
def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, expt_err):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if flv_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: flv_pol}

        LOG.tc_step("Set following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor_id, **specs)

    if img_pol is not None:
        image_meta = {ImageMetadata.CPU_POLICY: img_pol}
        LOG.tc_step("Create image with following metadata: {}".format(image_meta))
        image_id = glance_helper.create_image(name='cpu_pol_{}'.format(img_pol), cleanup='function', **image_meta)[1]
    else:
        image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)

    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol_img', source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format(boot_source))
    code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, source=boot_source,
                                         source_id=source_id, fail_ok=True, cleanup='function')

    # check for negative tests
    if expt_err is not None:
        LOG.tc_step("Check VM failed to boot due to conflict in flavor and image.")
        assert 4 == code, "Expect boot vm cli reject and no vm booted. Actual: {}".format(msg)
        assert eval(expt_err) in msg, "Expected error message is not found in cli return."
        return  # end the test for negative cases

    # Check for positive tests
    LOG.tc_step("Check vm is successfully booted.")
    assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg)

    # Calculate expected policy:
    expt_cpu_pol = flv_pol if flv_pol else img_pol
    expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared'

    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=expt_cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
def _test_ea_vm_with_multiple_crypto_vfs(vfs, _flavors, hosts_pci_device_info):
    """
    Verify guest can be launched with multiple crypto VFs, AVP, VIRTIO, and SRIOV interfaces.
    Verify max number of crypto VFs, verify beyond the limit (max is 32) and VM Maintenance
    activity.
    Args:
        vfs:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Launching a VM with flavor flavor_qat_vf_{}".format(vfs))
    vm_name = 'vm_with_{}_vf_pci_device'.format(vfs)
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type},
            {'net-id': internal_net_id, 'vif-model': vif_type}]

    if vfs == 33:
        LOG.tc_step("Verifying  VM with over limit crypto VFs={} can not be launched .....".format(vfs))
    else:
        LOG.tc_step("Verifying  VM with maximum crypto VFs={} .....".format(vfs))

    LOG.info("Boot a vm {} with pci-sriov nics, and flavor=flavor_qat_vf_{}".format(vm_name, vfs))
    flavor_id = _flavors['flavor_qat_vf_{}'.format(vfs)]
    rc, vm_id, msg = vm_helper.boot_vm(vm_name, flavor=flavor_id, nics=nics, cleanup='function', fail_ok=True)

    if vfs == 33:
        assert rc != 0, " Unexpected VM was launched with over limit crypto vfs: {}".format(msg)
    else:
        assert rc == 0, "VM is not successfully launched. Details: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host = vm_helper.get_vm_host(vm_id)
        host_dev_name = host_helper.get_host_devices(vm_host, field='device name',
                                                     **{'class id': DevClassID.QAT_VF})[0]
        expt_qat_devs = {host_dev_name: vfs}
        # 32 qat-vfs takes more than 1.5 hours to run tests
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, run_cpa=False)

        _perform_nova_actions(vms_dict={vm_name: vm_id}, flavors=_flavors, vfs=vfs)
        check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs, timeout=14400)
예제 #19
0
def launch_vm(vm_type, num_vcpu, host=None):
    img_id = None
    if vm_type == 'vhost':
        vif_model = 'virtio'
        if num_vcpu > 2:
            img_id = image_with_vif_multiq()
    else:
        vif_model = 'avp'

    LOG.tc_step("Boot a {} vm with {} vcpus on {}".format(
        vm_type, num_vcpu, host if host else "any host"))
    flavor_id = nova_helper.create_flavor(vcpus=num_vcpu,
                                          ram=1024,
                                          root_disk=2)[1]
    ResourceCleanup.add('flavor', flavor_id)
    extra_specs = {
        FlavorSpec.VCPU_MODEL: 'SandyBridge',
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.MEM_PAGE_SIZE: '2048'
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    nic1 = {'net-id': network_helper.get_mgmt_net_id()}
    nic2 = {'net-id': network_helper.get_tenant_net_id()}
    nic3 = {'net-id': network_helper.get_internal_net_id()}
    if vif_model != 'virtio':
        nic2['vif-model'] = vif_model
        nic3['vif-model'] = vif_model

    vol = cinder_helper.create_volume(source_id=img_id, cleanup='function')[1]
    host_info = {'avail_zone': 'nova', 'vm_host': host} if host else {}
    vm_id = vm_helper.boot_vm(name='dpdk-vm',
                              nics=[nic1, nic2, nic3],
                              flavor=flavor_id,
                              user_data=_get_dpdk_user_data(),
                              source='volume',
                              source_id=vol,
                              cleanup='function',
                              **host_info)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if host:
        vm_host = vm_helper.get_vm_host(vm_id)
        assert vm_host == host, "VM is not launched on {} as specified".format(
            host)

    return vm_id
예제 #20
0
def check_vm_hosts(vms, policy='affinity', best_effort=False):
    vm_hosts = []
    for vm in vms:
        vm_host = vm_helper.get_vm_host(vm_id=vm)
        vm_hosts.append(vm_host)

    vm_hosts = list(set(vm_hosts))
    if policy == 'affinity':
        if best_effort:
            return 1 == len(vm_hosts)
        assert 1 == len(vm_hosts), "VMs in affinity group are not on same host"

    else:
        if best_effort:
            return len(vms) == len(vm_hosts)
        assert len(vms) == len(vm_hosts), "VMs in anti_affinity group are not on different host"

    return vm_hosts
예제 #21
0
def check_host_file_for_vm(vm_id, expecting=True, host=None, fail_ok=True):
    LOG.info('Verify the file for vTPM exists on the hosting node for VM:' +
             vm_id)
    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    active_controller_name = system_helper.get_active_controller_name()

    instance_name = vm_helper.get_vm_instance_name(vm_id)
    vtpm_file = vtpm_base_dir.format(
        vm_id=vm_id, instance_name=instance_name) + '/' + vtpm_file_name

    if host != active_controller_name:
        hosting_node = host
    else:
        hosting_node = active_controller_name

    with host_helper.ssh_to_host(hosting_node) as ssh_client:
        if ssh_client.file_exists(vtpm_file):
            LOG.info('OK, found the file for vTPM:{} on host:{}'.format(
                vtpm_file, host))
            assert expecting is True or fail_ok is True, \
                'FAIL, the files supporting vTPM are NOT found on the {} as expected'.format(host)

            if expecting is True:
                LOG.info('-this is expected')
            else:
                LOG.info('-this is NOT expected')

            return True, expecting

        else:
            LOG.info('Cannot find the file for vTPM:{} on host:{}'.format(
                vtpm_file, host))
            assert expecting is False or fail_ok is True, \
                'FAIL, the files should be cleared as expected'

            if expecting is False:
                LOG.info('-this is expected')
            else:
                LOG.info('-this is NOT expected')

            return False, expecting
예제 #22
0
def _check_anti_affinity_vms():
    storage_backing, hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
    best_effort = True if len(hosts) < 3 else False
    anti_affinity_vms = nova_helper.get_server_group_info(group_name='grp_anti_affinity', headers='Members')[0]

    check_vm_hosts(vms=anti_affinity_vms, policy='anti_affinity', best_effort=best_effort)

    vm_hosts = []
    for vm_id in anti_affinity_vms:
        vm_helper.wait_for_vm_status(vm_id=vm_id, check_interval=10)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        vm_helper.live_migrate_vm(vm_id=vm_id)
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        vm_hosts.append(vm_helper.get_vm_host(vm_id))

    return vm_hosts, anti_affinity_vms
예제 #23
0
def check_vm_hosts(vms, policy='affinity', best_effort=False):
    LOG.tc_step("Check hosts for {} vms with best_effort={}".format(policy, best_effort))
    vm_hosts = []
    for vm in vms:
        vm_host = vm_helper.get_vm_host(vm_id=vm)
        LOG.info("Vm {} is hosted on: {}".format(vm, vm_host))
        vm_hosts.append(vm_host)

    vm_hosts = list(set(vm_hosts))
    if policy == 'affinity':
        if best_effort:
            return 1 == len(vm_hosts)
        assert 1 == len(vm_hosts), "VMs in affinity group are not on same host"

    else:
        if best_effort:
            return len(vms) == len(vm_hosts)
        assert len(vms) == len(vm_hosts), "VMs in anti_affinity group are not on different host"

    return vm_hosts
예제 #24
0
def test_vmx_setting():
    """
    Test that vmx feature can be set in guest VM.

    Test Steps:
       - Create a flavor with extra specs hw:wrs:nested_vmx=True and hw:cpu_model=<a cpu model supported by the host>
       - Instantiate a VM with the flavor and check that vm has correct vcpu model
       - ssh into the VM and execute "grep vmx /proc/cpuinfo" and verify that vmx feature is set
    """

    # Create a flavor with specs: hw:wrs:nested_vmx=True and extraspec hw:cpu_model=<compute host cpu model>

    host_cpu_model = 'Passthrough'
    LOG.tc_step("Create flavor for vcpu model {}".format(host_cpu_model))
    flavor_id = nova_helper.create_flavor(fail_ok=False)[1]
    ResourceCleanup.add('flavor', flavor_id)

    LOG.tc_step(
        "Set extra specs for flavor of vcpu model {}".format(host_cpu_model))
    extra_specs = {
        FlavorSpec.NESTED_VMX: True,
        FlavorSpec.VCPU_MODEL: host_cpu_model
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.tc_step("Create VM for vcpu model {}".format(host_cpu_model))
    code, vm, msg = vm_helper.boot_vm(flavor=flavor_id,
                                      cleanup='function',
                                      fail_ok=False)
    ResourceCleanup.add('vm', vm)
    LOG.tc_step("Check vcpu model is correct")
    host = vm_helper.get_vm_host(vm)
    expt_arch = host_helper.get_host_cpu_model(host)
    check_vm_cpu_model(vm_id=vm, vcpu_model='Passthrough', expt_arch=expt_arch)

    LOG.tc_step("Checking to see if 'vmx' is in /proc/cpuinfo")
    with vm_helper.ssh_to_vm_from_natbox(vm) as vm_ssh:
        vm_ssh.exec_cmd("grep vmx /proc/cpuinfo", fail_ok=False)
예제 #25
0
    def test_evacuate_vm(self, guest_os, boot_source):
        """
        Test evacuate VM with specified guest and boot source
        Args:
            guest_os (str): guest OS name
            boot_source (str): volume or image

        Setup:
            - Ensure sufficient space on system to create the required guest. Skip otherwise.

        Test Steps:
            - Boot a VM with given guest OS from specified boot source
            - Ensure VM is reachable from NatBox
            - 'sudo reboot -f' on vm host to evacuated it
            - Check vm is successfully evacuated - active state and reachable from NatBox

        Teardown:
            - Delete created vm, volume if any, and glance image

        """
        img_id = check_helper.check_fs_sufficient(guest_os=guest_os,
                                                  boot_source=boot_source)

        source_id = img_id if boot_source == 'image' else None
        LOG.tc_step("Boot a {} VM from {}".format(guest_os, boot_source))
        vm_id = vm_helper.boot_vm(name="{}_{}".format(guest_os, boot_source),
                                  source=boot_source,
                                  source_id=source_id,
                                  guest_os=guest_os,
                                  cleanup='function')[1]

        LOG.tc_step("Wait for VM pingable from NATBox")
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        vm_host = vm_helper.get_vm_host(vm_id)
        LOG.tc_step("Reboot VM host {}".format(vm_host))
        vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id, ping_vms=True)
예제 #26
0
    def target_hosts_negative(self, no_simplex, request):
        self.hosts_locked = []

        storages_to_test = []
        hosts_per_backing = host_helper.get_hosts_per_storage_backing()
        for storage_backing in ['local_image', 'remote']:
            hosts = hosts_per_backing.get(storage_backing, [])
            if len(hosts) == 1:
                storages_to_test.append(storage_backing)

        if not storages_to_test:
            skip(
                "Test requires specific storage backing supported by only one host for negative test."
            )

        all_vms = []
        target_hosts = []
        all_new_flavors = []
        for storage_backing in storages_to_test:
            vms_to_test, flavors_created = _boot_migrable_vms(storage_backing)
            all_new_flavors += flavors_created
            for vm in vms_to_test:
                target_hosts.append(vm_helper.get_vm_host(vm[0]))
                all_vms.append(vm[0])

        def teardown():
            LOG.info("Delete all created vms and unlock target host(s)...")
            for vm_to_del in all_vms:
                vm_helper.delete_vms(vm_to_del)
            nova_helper.delete_flavors(all_new_flavors)
            for host_to_unlock in self.hosts_locked:
                host_helper.unlock_host(host_to_unlock,
                                        check_hypervisor_up=True)

        request.addfinalizer(teardown)

        return target_hosts, storages_to_test
예제 #27
0
def check_vm_files(vm_id,
                   storage_backing,
                   ephemeral,
                   swap,
                   vm_type,
                   file_paths,
                   content,
                   root=None,
                   vm_action=None,
                   prev_host=None,
                   post_host=None,
                   disks=None,
                   post_disks=None,
                   guest_os=None,
                   check_volume_root=False):
    """
    Check the files on vm after specified action. This is to check the disks
    in the basic nova matrix table.
    Args:
        vm_id (str):
        storage_backing (str): local_image, local_lvm, or remote
        root (int): root disk size in flavor. e.g., 2, 5
        ephemeral (int): e.g., 0, 1
        swap (int): e.g., 0, 512
        vm_type (str): image, volume, image_with_vol, vol_with_vol
        file_paths (list): list of file paths to check
        content (str): content of the files (assume all files have the same
        content)
        vm_action (str|None): live_migrate, cold_migrate, resize, evacuate,
            None (expect no data loss)
        prev_host (None|str): vm host prior to vm_action. This is used to
        check if vm host has changed when needed.
        post_host (None|str): vm host after vm_action.
        disks (dict): disks that are returned from
        vm_helper.get_vm_devices_via_virsh()
        post_disks (dict): only used in resize case
        guest_os (str|None): default guest assumed for None. e,g., ubuntu_16
        check_volume_root (bool): whether to check root disk size even if vm
        is booted from image

    Returns:

    """
    final_disks = post_disks if post_disks else disks
    final_paths = list(file_paths)
    if not disks:
        disks = vm_helper.get_vm_devices_via_virsh(vm_id=vm_id)

    eph_disk = disks.get('eph', {})
    if not eph_disk:
        if post_disks:
            eph_disk = post_disks.get('eph', {})
    swap_disk = disks.get('swap', {})
    if not swap_disk:
        if post_disks:
            swap_disk = post_disks.get('swap', {})

    disk_check = 'no_loss'
    if vm_action in [None, 'live_migrate']:
        disk_check = 'no_loss'
    elif vm_type == 'volume':
        # boot-from-vol, non-live migrate actions
        disk_check = 'no_loss'
        if storage_backing == 'local_lvm' and (eph_disk or swap_disk):
            disk_check = 'eph_swap_loss'
        elif storage_backing == 'local_image' and vm_action == 'evacuate' and (
                eph_disk or swap_disk):
            disk_check = 'eph_swap_loss'
    elif storage_backing == 'local_image':
        # local_image, boot-from-image, non-live migrate actions
        disk_check = 'no_loss'
        if vm_action == 'evacuate':
            disk_check = 'local_loss'
    elif storage_backing == 'local_lvm':
        # local_lvm, boot-from-image, non-live migrate actions
        disk_check = 'local_loss'
        if vm_action == 'resize':
            post_host = post_host if post_host else vm_helper.get_vm_host(
                vm_id)
            if post_host == prev_host:
                disk_check = 'eph_swap_loss'

    LOG.info("disk check type: {}".format(disk_check))
    loss_paths = []
    if disk_check == 'no_loss':
        no_loss_paths = final_paths
    else:
        # If there's any loss, we must not have remote storage. And any
        # ephemeral/swap disks will be local.
        disks_to_check = disks.get('eph', {})
        # skip swap type checking for data loss since it's not a regular
        # filesystem
        # swap_disks = disks.get('swap', {})
        # disks_to_check.update(swap_disks)

        for path_ in final_paths:
            # For tis-centos-guest, ephemeral disk is mounted to /mnt after
            # vm launch.
            if str(path_).rsplit('/', 1)[0] == '/mnt':
                loss_paths.append(path_)
                break

        for disk in disks_to_check:
            for path in final_paths:
                if disk in path:
                    # We mount disk vdb to /mnt/vdb, so this is looking for
                    # vdb in the mount path
                    loss_paths.append(path)
                    break

        if disk_check == 'local_loss':
            # if vm booted from image, then the root disk is also local disk
            root_img = disks.get('root_img', {})
            if root_img:
                LOG.info(
                    "Auto mount vm disks again since root disk was local with "
                    "data loss expected")
                vm_helper.auto_mount_vm_disks(vm_id=vm_id, disks=final_disks)
                file_name = final_paths[0].rsplit('/')[-1]
                root_path = '/{}'.format(file_name)
                loss_paths.append(root_path)
                assert root_path in final_paths, \
                    "root_path:{}, file_paths:{}".format(root_path, final_paths)

        no_loss_paths = list(set(final_paths) - set(loss_paths))

    LOG.info("loss_paths: {}, no_loss_paths: {}, total_file_pahts: {}".format(
        loss_paths, no_loss_paths, final_paths))
    res_files = {}
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_id,
                                         vm_image_name=guest_os) as vm_ssh:
        vm_ssh.exec_sudo_cmd('cat /etc/fstab')
        vm_ssh.exec_sudo_cmd("mount | grep --color=never '/dev'")

        for file_path in loss_paths:
            vm_ssh.exec_sudo_cmd('touch {}2'.format(file_path), fail_ok=False)
            vm_ssh.exec_sudo_cmd('echo "{}" >> {}2'.format(content, file_path),
                                 fail_ok=False)

        for file_path in no_loss_paths:
            output = vm_ssh.exec_sudo_cmd('cat {}'.format(file_path),
                                          fail_ok=False)[1]
            res = '' if content in output else 'content mismatch'
            res_files[file_path] = res

        for file, error in res_files.items():
            assert not error, "Check {} failed: {}".format(file, error)

        swap_disk = final_disks.get('swap', {})
        if swap_disk:
            disk_name = list(swap_disk.keys())[0]
            partition = '/dev/{}'.format(disk_name)
            if disk_check != 'local_loss' and not disks.get('swap', {}):
                mount_on, fs_type = storage_helper.mount_partition(
                    ssh_client=vm_ssh,
                    disk=disk_name,
                    partition=partition,
                    fs_type='swap')
                storage_helper.auto_mount_fs(ssh_client=vm_ssh,
                                             fs=partition,
                                             mount_on=mount_on,
                                             fs_type=fs_type)

            LOG.info("Check swap disk is on")
            swap_output = vm_ssh.exec_sudo_cmd(
                'cat /proc/swaps | grep --color=never {}'.format(partition))[1]
            assert swap_output, "Expect swapon for {}. Actual output: {}". \
                format(partition, vm_ssh.exec_sudo_cmd('cat /proc/swaps')[1])

            LOG.info("Check swap disk size")
            _check_disk_size(vm_ssh, disk_name=disk_name, expt_size=swap)

        eph_disk = final_disks.get('eph', {})
        if eph_disk:
            LOG.info("Check ephemeral disk size")
            eph_name = list(eph_disk.keys())[0]
            _check_disk_size(vm_ssh, eph_name, expt_size=ephemeral * 1024)

        if root:
            image_root = final_disks.get('root_img', {})
            root_name = ''
            if image_root:
                root_name = list(image_root.keys())[0]
            elif check_volume_root:
                root_name = list(final_disks.get('root_vol').keys())[0]

            if root_name:
                LOG.info("Check root disk size")
                _check_disk_size(vm_ssh,
                                 disk_name=root_name,
                                 expt_size=root * 1024)
예제 #28
0
def check_topology_of_vm(vm_id,
                         vcpus,
                         prev_total_cpus=None,
                         numa_num=None,
                         vm_host=None,
                         cpu_pol=None,
                         cpu_thr_pol=None,
                         expt_increase=None,
                         min_vcpus=None,
                         current_vcpus=None,
                         prev_siblings=None,
                         shared_vcpu=None,
                         con_ssh=None,
                         guest=None):
    """
    Check vm has the correct topology based on the number of vcpus,
    cpu policy, cpu threads policy, number of numa nodes

    Check is done via vm-topology, nova host-describe, virsh vcpupin (on vm
    host), nova-compute.log (on vm host),
    /sys/devices/system/cpu/<cpu#>/topology/thread_siblings_list (on vm)

    Args:
        vm_id (str):
        vcpus (int): number of vcpus specified in flavor
        prev_total_cpus (float): such as 37.0000,  37.0625
        numa_num (int): number of numa nodes vm vcpus are on. Default is 1 if
        unset in flavor.
        vm_host (str):
        cpu_pol (str): dedicated or shared
        cpu_thr_pol (str): isolate, require, or prefer
        expt_increase (int): expected total vcpu increase on vm host compared
        to prev_total_cpus
        min_vcpus (None|int): min vcpu flavor spec. vcpu scaling specific
        current_vcpus (None|int): current number of vcpus. vcpu scaling specific
        prev_siblings (list): list of siblings total. Usually used when
        checking vm topology after live migration
        con_ssh (SSHClient)
        shared_vcpu (int): which vcpu is shared
        guest (str|None): guest os. e.g., ubuntu_14. Default guest is assumed
        when None.

    """
    LOG.info("------ Check topology of vm {} on controller, hypervisor and "
             "vm".format(vm_id))
    cpu_pol = cpu_pol if cpu_pol else 'shared'

    if vm_host is None:
        vm_host = vm_helper.get_vm_host(vm_id, con_ssh=con_ssh)

    log_cores_siblings = host_helper.get_logcore_siblings(host=vm_host,
                                                          con_ssh=con_ssh)

    if prev_total_cpus is not None:
        if expt_increase is None:
            expt_increase = vcpus

        LOG.info("{}Check total vcpus for vm host is increased by {} via "
                 "'openstack hypervisor show'".format(SEP, expt_increase))
        expt_used_vcpus = prev_total_cpus + expt_increase
        end_time = time.time() + 70
        while time.time() < end_time:
            post_hosts_cpus = host_helper.get_vcpus_for_computes(
                hosts=vm_host, field='vcpus_used')
            if expt_used_vcpus == post_hosts_cpus[vm_host]:
                break
            time.sleep(10)
        else:
            post_hosts_cpus = host_helper.get_vcpus_for_computes(
                hosts=vm_host, field='used_now')
            assert expt_used_vcpus == post_hosts_cpus[
                vm_host], "Used vcpus on host {} is not as expected. " \
                          "Expected: {}; Actual: {}".format(vm_host,
                                                            expt_used_vcpus,
                                                            post_hosts_cpus[
                                                                vm_host])

    LOG.info(
        "{}Check vm vcpus, pcpus on vm host via nova-compute.log and virsh "
        "vcpupin".format(SEP))
    # Note: floating vm pcpus will not be checked via virsh vcpupin
    vm_host_cpus, vm_siblings = _check_vm_topology_on_host(
        vm_id,
        vcpus=vcpus,
        vm_host=vm_host,
        cpu_pol=cpu_pol,
        cpu_thr_pol=cpu_thr_pol,
        host_log_core_siblings=log_cores_siblings,
        shared_vcpu=shared_vcpu)

    LOG.info(
        "{}Check vm vcpus, siblings on vm via "
        "/sys/devices/system/cpu/<cpu>/topology/thread_siblings_list".format(
            SEP))
    check_sibling = True if shared_vcpu is None else False
    _check_vm_topology_on_vm(vm_id,
                             vcpus=vcpus,
                             siblings_total=vm_siblings,
                             current_vcpus=current_vcpus,
                             prev_siblings=prev_siblings,
                             guest=guest,
                             check_sibling=check_sibling)

    return vm_host_cpus, vm_siblings
예제 #29
0
    def test_shared_cpu_migrate(self, config_host_cpus):
        """
        Test vm with shared cpus enabled can successful live migrate to a node with shared vcpus enabled and fails when
        it tries to migrate to a node with shared vcpus disabled

        Setup:
            - Skip if there are less than 3 hosts
            - Configure at least one compute to disable shared vcpus
            - Configure at least two computes to have shared cpus via
                'system host-cpu-modify -f shared p0=1,p1=1 <hostname>' (module)

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor
            - Ensure vm is booted successfully
            - Perform a non-forced live migration on vm. Ensure that vm is on a shared cpu host.
            - Perform a non-forced cold migration on vm. Ensure that vm is on a shared cpu host.
            - Force live-migrate vm to host with shared vcpus enabled. The migration should succeed
                - Ensure that the vm is on a different host
            - Force live-migrate vm to the host with disabled shared vcpus. The migration should fail
                - Verify error by ensuring that vm is still on same host and grep nova-scheduler logs for
                'CANNOT SCHEDULE'

        Teardown:
            - Delete created vm if any (function)
            - Revert any hosts that were changed for this test

        """

        storage_backing, disable_shared_cpu_host, enabled_shared_hosts = config_host_cpus

        LOG.tc_step("Create a flavor with given number of vcpus")
        flavor = create_shared_flavor(vcpus=2, storage_backing=storage_backing, shared_vcpu=1)
        nova_helper.set_flavor(flavor, **{FlavorSpec.MEM_PAGE_SIZE: 2048})

        LOG.tc_step("Boot a vm with above flavor, and ensure vm is booted successfully")
        vm_id = vm_helper.boot_vm(name='shared_cpu', flavor=flavor, fail_ok=False, cleanup='function')[1]
        origin_host = vm_helper.get_vm_host(vm_id)
        assert origin_host in enabled_shared_hosts, "VM not booted on shared cpu host"

        LOG.tc_step("Perform a non-forced live migration onto an enabled shared cpu host, expect success")
        vm_helper.live_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        new_host = vm_helper.get_vm_host(vm_id)
        assert new_host in enabled_shared_hosts, "VM not migrated on shared cpu host"

        LOG.tc_step("Perform a non-forced cold migration onto an enabled shared cpu host, expect success")
        vm_helper.cold_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        new_host = vm_helper.get_vm_host(vm_id)
        assert new_host in enabled_shared_hosts, "VM not migrated on shared cpu host"

        if new_host != enabled_shared_hosts[0]:
            dest_host = enabled_shared_hosts[0]
        else:
            dest_host = enabled_shared_hosts[1]

        LOG.tc_step("Perform second live migration onto an enabled shared cpu host, expect success")
        vm_helper.live_migrate_vm(vm_id, destination_host=dest_host)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Perform third live migration onto a disabled shared cpu host, expect failure")
        code = vm_helper.live_migrate_vm(vm_id, destination_host=disable_shared_cpu_host, fail_ok=True)[0]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        assert code > 0, "Migrate not rejected as expected"
        assert vm_helper.get_vm_host(vm_id) == dest_host, "VM not on same compute node"

        LOG.tc_step("Verify second live migration failed via nova-scheduler.log")
        req_id = get_failed_live_migrate_action_id(vm_id)
        grepcmd = "grep 'CANNOT SCHEDULE' /var/log/nova/nova-scheduler.log | grep {}".format(req_id)
        control_ssh = ControllerClient.get_active_controller()
        control_ssh.exec_cmd(grepcmd, fail_ok=False)
예제 #30
0
    def test_evacuate_shared_cpu_vm(self, target_hosts, add_shared_cpu, add_admin_role_func):
        """
        Test that instance with shared vcpu can be evacuated and that the vm still has shared vcpu after evacuation

        Setup:
            - Configure at least two computes to have shared cpus via
                'system host-cpu-modify -f shared p0=1,p1=1 <hostname>' (module)

        Test Steps:
            - Create 2 VMs with shared vcpu
            - Boot a vm for each of the created flavors
            - Ensure all vms are booted successfully and validate the shared vcpus
            - Evacuate the vms
            - Ensure evacuation is successful and validate the shared vcpus

        Teardown:
            - Delete created vms and flavors
            - Set shared cpus to 0 (default setting) on the compute node under test (module)

        """
        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu
        vm_helper.delete_vms()
        prev_total_vcpus = host_helper.get_vcpus_for_computes()

        target_host = shared_cpu_hosts[0]
        vms = []
        vcpus = 2
        shared_vcpu = 1
        pcpus = vcpus - shared_vcpu
        expt_increase = 0
        LOG.tc_step("Create two 2 vcpu VMs each with 1 shared vcpu")
        flv_id = create_shared_flavor(vcpus=vcpus, shared_vcpu=shared_vcpu, storage_backing=storage_backing)
        for _ in range(2):

            vm_id = vm_helper.boot_vm(name='shared_cpu', flavor=flv_id, fail_ok=False, avail_zone='nova',
                                      vm_host=target_host, cleanup='function')[1]
            vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

            expt_increase += pcpus
            LOG.tc_step("Check vm {} numa node setting via vm-topology".format(vm_id))
            check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=prev_total_vcpus,
                              expt_increase=expt_increase)
            vms.append(vm_id)

        LOG.tc_step("Evacuate vms")
        vm_helper.evacuate_vms(target_host, vms_to_check=vms, ping_vms=True)

        vm_hosts = []
        LOG.tc_step("Check shared vcpus and numa settings for vms after evacuation")
        for vm_ in vms:
            vm_host = vm_helper.get_vm_host(vm_id=vm_)
            vm_hosts.append(vm_host)

        if len(list(set(vm_hosts))) == 1:
            post_evac_expt_increase = pcpus * 2
        else:
            post_evac_expt_increase = pcpus

        for vm_ in vms:
            check_shared_vcpu(vm=vm_, expt_increase=post_evac_expt_increase,
                              prev_total_vcpus=prev_total_vcpus, shared_vcpu=shared_vcpu, vcpus=vcpus)