コード例 #1
0
ファイル: test_migrate.py プロジェクト: pvaduva/auto_test
def test_migrate_stress(check_hypervisors, boot_source, count):

    LOG.tc_step("Launch a VM from {}".format(boot_source))
    vm = vm_helper.boot_vm(name='{}-stress'.format(boot_source), cleanup='function',
                           source=boot_source)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    block_mig = True if boot_source == 'image' else False
    if not block_mig:
        LOG.tc_step("Attempt to block migration on boot-from-volume VM and ensure if fails")
        code = vm_helper.live_migrate_vm(vm_id=vm, block_migrate=True)[0]
        assert code > 0, "Block migration passed unexpectedly for boot-from-volume vm"
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    LOG.tc_step("Live migrate and ping vm 1000 times")
    for i in range(count):
        LOG.info('Live migration iter{}'.format(i+1))
        vm_helper.live_migrate_vm(vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    LOG.tc_step("Cold migrate vm followed by live migrate {} times".format(count))
    for i in range(count):
        LOG.info('Cold+live migration iter{}'.format(i + 1))
        vm_helper.cold_migrate_vm(vm_id=vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

        vm_helper.live_migrate_vm(vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
コード例 #2
0
def test_heat_vm_scale_after_actions(vm_scaling_stack, actions):
    """
    Test VM auto scaling with swact:
        Create heat stack for auto scaling using NestedAutoScale.yaml,  swact and perform vm scale up and down.

    Test Steps:
        - Create a heat stack for auto scaling vm ()
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - live migrate the vm if not sx
        - cold migrate the vm if not sx
        - swact if not sx
        - reboot -f vm host
        - trigger auto scale by boosting cpu usage in the vm (using dd)
        - verify it scale up to the max number of vms (3)
        - trigger scale down by killing dd in the vm
        - verify the vm scale down to min number (1)
        - Delete Heat stack and verify resource deletion
    """
    stack_name, vm_id = vm_scaling_stack
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if not system_helper.is_aio_simplex():
        actions = actions.split('-')
        if "swact" in actions:
            LOG.tc_step("Swact before scale in/out")
            host_helper.swact_host()

        if "live_migrate" in actions:
            LOG.tc_step("live migrate vm before scale in/out")
            vm_helper.live_migrate_vm(vm_id)

        if "cold_migrate" in actions:
            LOG.tc_step("cold migrate vm before scale in/out")
            vm_helper.cold_migrate_vm(vm_id)

    if "host_reboot" in actions:
        if system_helper.is_aio_simplex():
            host_helper.reboot_hosts('controller-0')
            vm_helper.wait_for_vm_status(vm_id,
                                         status=VMStatus.ACTIVE,
                                         timeout=600,
                                         check_interval=10,
                                         fail_ok=False)
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id, timeout=VMTimeout.DHCP_RETRY)
        else:
            LOG.tc_step("evacuate vm before scale in/out")
            vm_host = vm_helper.get_vm_host(vm_id=vm_id)
            vm_helper.evacuate_vms(host=vm_host, vms_to_check=vm_id)

    LOG.tc_step(
        "Wait for {} vms to auto scale out to {} after running dd in vm(s)".
        format(stack_name, 3))
    vm_helper.wait_for_auto_vm_scale_out(stack_name, expt_max=3)

    LOG.tc_step(
        "Wait for {} vms to auto scale in to {} after killing dd processes in vms"
        .format(stack_name, 1))
    vm_helper.wait_for_auto_vm_scale_in(stack_name, expt_min=1)
コード例 #3
0
def _test_migrate_anti_affinity_vms_in_parallel():
    """
    cold-migrate and live-migrate vms from anti-affinity group
    It will check if the heat stack is launched already if not it will launch the stack
    find the vms in anti-affinity group and will do cold and live migration

    """
    # First make sure heat stack is there:
    system_test_helper.launch_heat_stack()

    srv_grps_info = nova_helper.get_server_groups_info(headers=('Policies',
                                                                'Metadata',
                                                                'Members'))
    vms = []
    for group in srv_grps_info:
        policies, metadata, members = srv_grps_info[group]
        if members and 'anti-affinity' in policies and metadata[
                'wrs-sg:best_effort'] == 'false':
            if len(members) >= 10:
                vms = members[range(0, 9)]
            break
    else:
        skip("There are no VMs in anti-affinity server group")

    check_vm_hosts(vms=vms, policy='anti_affinity')

    for vm_id in vms:
        vm_helper.wait_for_vm_status(vm_id=vm_id, check_interval=10)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
        vm_helper.live_migrate_vm(vm_id=vm_id)
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    check_vm_hosts(vms=vms, policy='anti_affinity')
コード例 #4
0
def _perform_nova_actions(vms_dict, flavors, vfs=None):
    for vm_name, vm_id in vms_dict.items():
        LOG.tc_step("Cold migrate VM {} ....".format(vm_name))
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Live migrate VM {} ....".format(vm_name))
        expt_codes = [0] if 'vm_no_crypto' in vm_name else [1, 6]
        code, msg = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=True)
        assert code in expt_codes, "Expect live migrate to fail for vm with pci device attached. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Suspend/Resume VM {} ....".format(vm_name))
        vm_helper.suspend_vm(vm_id)
        vm_helper.resume_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        if vfs is None:
            resize_flavor_id = flavors["flavor_resize_qat_vf_1"] if "no_crypto" not in vm_name else \
                flavors["flavor_resize_none"]
        else:
            resize_flavor_id = flavors['flavor_resize_qat_vf_{}'.format(vfs)]

        LOG.info("Resizing VM {} to new flavor {} ...".format(vm_name, resize_flavor_id))
        vm_helper.resize_vm(vm_id, resize_flavor_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)
コード例 #5
0
ファイル: test_mempage_4k.py プロジェクト: pvaduva/auto_test
def test_migrate_4k_vm_positive(ephemeral, swap, cpu_pol, vcpus, vm_type,
                                ensure_sufficient_4k_pages):
    """
    Test live and cold migrate 4k vm with various vm storage configurations
    Args:
        ephemeral (int):
        swap (int):
        cpu_pol (str):
        vcpus (int):
        vm_type (str): boot-from image or volume vm
        ensure_sufficient_4k_pages (tuple): module test fixture to configure 4k pages

    Setups:
        - Select at least 2 hosts with specified storage backing. e.g., local_image, or remote
        - Ensure 2 hosts are in nova zone (move rest to cgcsauto zone if more than 2)
        - Configure the 2 hosts with large amount of 4k pages

    Test Steps:
        - Create flavor with specified ephemeral, swap,

    """
    storage_backing, hosts = ensure_sufficient_4k_pages

    vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol,
                                vcpus, vm_type)

    LOG.tc_step("Cold migrate VM and ensure it succeeded")
    vm_helper.cold_migrate_vm(vm_id)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    __check_pagesize(vm_id)

    LOG.tc_step("Attempt to live migrate VM")
    vm_helper.live_migrate_vm(vm_id)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    __check_pagesize(vm_id)
コード例 #6
0
ファイル: ~test_fip.py プロジェクト: pvaduva/auto_test
def obsolete_test_fip(fip_setups):
    """
    Test VM Floating IP  over VM launch, live-migration, cold-migration, pause/unpause, etc

    Args:
        fip_setups: test fixture

    Test Setups (module):
        - Create a floating ip
        - boot a vm
        - Attach floating ip to vm

    Test Steps:
        - Ping  VM FIP
        - Live-migrate the VM and verify ping from VM
        - Cold-migrate the VM and verify ping from VM
        - Pause and un-pause the VM and verify ping from VM
        - Suspend and resume the VM and verify ping from VM
        - Stop and start the VM and verify ping from VM
        - Reboot the VM and verify ping from VM
        - Ping  VM FIP

    Test Teardown:
        - Delete created FIP and vm (module)

    """
    vm_id, fip = fip_setups
    LOG.tc_step("Ping VM with Floating IP ")
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Live-migrate the VM and verify ping from VM")
    vm_helper.live_migrate_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Cold-migrate the VM and verify ping from VM")
    vm_helper.cold_migrate_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Pause and un-pause the VM and verify ping from VM")
    vm_helper.pause_vm(vm_id)
    vm_helper.unpause_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Suspend and resume the VM and verify ping from VM")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Stop and start the VM and verify ping from VM")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Reboot the VM and verify ping from VM")
    vm_helper.reboot_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Ping VM with Floating IP Ensure FIP reachable ")
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)
コード例 #7
0
ファイル: test_backup.py プロジェクト: pvaduva/auto_test
def pb_migrate_test(backup_info, con_ssh, vm_ids=None):
    """
    Run migration test before doing system backup.

    Args:
        backup_info:
            - options for doing backup

        con_ssh:
            - current ssh connection

        vm_ids
    Return:
        None
    """

    hyporvisors = host_helper.get_up_hypervisors(con_ssh=con_ssh)
    if len(hyporvisors) < 2:
        LOG.info(
            'Only {} hyporvisors, it is not enougth to test migration'.format(
                len(hyporvisors)))
        LOG.info('Skip migration test')
        return 0
    else:
        LOG.debug('There {} hyporvisors'.format(len(hyporvisors)))

    LOG.info('Randomly choose some VMs and do migrate:')

    target = random.choice(vm_ids)
    LOG.info('-OK, test migration of VM:{}'.format(target))

    original_host = vm_helper.get_vm_host(target)
    LOG.info('Original host:{}'.format(original_host))

    vm_helper.live_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After live-migration, host:{}'.format(original_host))

    if original_host == current_host:
        LOG.info('backup_info:{}'.format(backup_info))
        LOG.warn(
            'VM is still on its original host, live-migration failed? original host:{}'
            .format(original_host))

    original_host = current_host
    vm_helper.cold_migrate_vm(target)
    current_host = vm_helper.get_vm_host(target)
    LOG.info('After code-migration, host:{}'.format(current_host))
    if original_host == current_host:
        LOG.warn(
            'VM is still on its original host, code-migration failed? original host:{}'
            .format(original_host))
コード例 #8
0
def _check_anti_affinity_vms():
    storage_backing, hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
    best_effort = True if len(hosts) < 3 else False
    anti_affinity_vms = nova_helper.get_server_group_info(group_name='grp_anti_affinity', headers='Members')[0]

    check_vm_hosts(vms=anti_affinity_vms, policy='anti_affinity', best_effort=best_effort)

    vm_hosts = []
    for vm_id in anti_affinity_vms:
        vm_helper.wait_for_vm_status(vm_id=vm_id, check_interval=10)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        vm_helper.live_migrate_vm(vm_id=vm_id)
        vm_helper.cold_migrate_vm(vm_id=vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        vm_hosts.append(vm_helper.get_vm_host(vm_id))

    return vm_hosts, anti_affinity_vms
コード例 #9
0
ファイル: test_nova_helper.py プロジェクト: pvaduva/auto_test
def test_boot_vm_on_host(add_host_to_zone):
    target_host = add_host_to_zone

    vm_id = vm_helper.boot_vm(name='cgcsauto_zone',
                              avail_zone='cgcsauto',
                              vm_host=target_host,
                              cleanup='function')[1]

    assert target_host == vm_helper.get_vm_host(vm_id=vm_id)

    res, msg = vm_helper.cold_migrate_vm(vm_id=vm_id, fail_ok=True)

    assert 1 == res, "Expect cold migration reject due to no other host in cgcsauto zone, actual result: {}".format(
        msg)
コード例 #10
0
def _check_affinity_vms():
    affinity_vms = nova_helper.get_server_group_info(group_name='grp_affinity', headers='Members')[0]
    vm_host = check_vm_hosts(vms=affinity_vms, policy='affinity')[0]

    for vm_id in affinity_vms:
        vm_helper.wait_for_vm_status(vm_id=vm_id, check_interval=10)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

        res, out = vm_helper.live_migrate_vm(vm_id=vm_id, fail_ok=True)
        assert res in (1, 2, 6), out

        res, out = vm_helper.cold_migrate_vm(vm_id=vm_id, fail_ok=True)
        assert res in (1, 2), out

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    return vm_host, affinity_vms
コード例 #11
0
def test_snat_vm_actions(snat_setups, snat):
    """
    Test VM external access over VM launch, live-migration, cold-migration, pause/unpause, etc

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Enable/Disable SNAT based on snat param
        - Ping from VM to 8.8.8.8
        - wget <lab_fip> to VM
        - scp from NatBox to VM
        - Live-migrate the VM and verify ping from VM
        - Cold-migrate the VM and verify ping from VM
        - Pause and un-pause the VM and verify ping from VM
        - Suspend and resume the VM and verify ping from VM
        - Stop and start the VM and verify ping from VM
        - Reboot the VM and verify ping from VM

    Test Teardown:
        - Enable snat for next test in the same module     (function)
        - Delete the created vm     (module)
        - Disable snat  (module)

    """
    vm_ = snat_setups[0]
    snat = True if snat == 'snat_enabled' else False
    LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat))
    network_helper.set_router_gateway(enable_snat=snat)

    # Allow router update to complete, since we've seen cases where ping vm pass but ssh fail
    time.sleep(30)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=snat)

    LOG.tc_step("Ping from VM {} to 8.8.8.8".format(vm_))
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("wget to VM {}".format(vm_))
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_, use_fip=True) as vm_ssh:
        vm_ssh.exec_cmd('wget google.ca', fail_ok=False)

    LOG.tc_step("scp from NatBox to VM {}".format(vm_))
    vm_fip = network_helper.get_external_ips_for_vms(vms=vm_)[0]
    natbox_ssh = NATBoxClient.get_natbox_client()
    natbox_ssh.scp_on_source(source_path='test', dest_user='******', dest_ip=vm_fip, dest_path='/tmp/',
                             dest_password='******', timeout=30)

    LOG.tc_step("Live-migrate the VM and verify ping from VM")
    vm_helper.live_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Cold-migrate the VM and verify ping from VM")
    vm_helper.cold_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Pause and un-pause the VM and verify ping from VM")
    vm_helper.pause_vm(vm_)
    vm_helper.unpause_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Suspend and resume the VM and verify ping from VM")
    vm_helper.suspend_vm(vm_)
    vm_helper.resume_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Stop and start the VM and verify ping from VM")
    vm_helper.stop_vms(vm_)
    vm_helper.start_vms(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Reboot the VM and verify ping from VM")
    vm_helper.reboot_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Resize the vm to a flavor with 2 dedicated cpus and verify ping from VM")
    new_flv = nova_helper.create_flavor(name='ded', vcpus=2)[1]
    ResourceCleanup.add('flavor', new_flv, scope='module')
    nova_helper.set_flavor(new_flv, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    vm_helper.resize_vm(vm_, new_flv)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)
コード例 #12
0
def perform_vm_operation(vm_type,
                         vm_id,
                         op='live_migration',
                         extra_specs='vtpm'):
    LOG.info('Perform action:{} to the VM, extra specs:{}'.format(
        op, extra_specs))

    op_table = {
        'live_migration':
        lambda x, y: vm_helper.live_migrate_vm(y),
        'cold_migration':
        lambda x, y: vm_helper.cold_migrate_vm(y),
        'stop_start':
        lambda x, y: (vm_helper.stop_vms(y), vm_helper.start_vms(y)),
        'suspend_resume':
        lambda x, y: (vm_helper.suspend_vm(y), vm_helper.resume_vm(y)),
        'pause_unpause':
        lambda x, y: (vm_helper.pause_vm(y), vm_helper.unpause_vm(y)),
        'reboot_host':
        lambda x, y: reboot_hosting_node(x, y, force_reboot=False),
        'soft_reboot':
        lambda x, y: vm_helper.reboot_vm(y, hard=False),
        'hard_reboot':
        lambda x, y: vm_helper.reboot_vm(y, hard=True),
        'lock_unlock':
        lambda x, y: lock_unlock_hosting_node(x, y, force_lock=False),
        'evacuate':
        lambda x, y: reboot_hosting_node(x, y, force_reboot=True),
    }

    if op in op_table:
        LOG.info('Perform action: {}'.format(op))
        op_table[op](vm_type, vm_id)

        return True

    elif op == 'resize_to_autorc':
        if vm_type == 'autorc':
            LOG.info(
                'resize from AUTO-RECOVERY to another AUTO-RECOVER flavor')

        to_flavor_id = get_flavor_id(vm_type, 'autorc2')

        LOG.info('TODO: {}, m_type={}, to_flavor_id={}'.format(
            to_flavor_id, vm_type, to_flavor_id))

        vm_helper.resize_vm(vm_id, to_flavor_id)

    elif op == 'resize_to_non_autorc':
        LOG.info('perform {} on type:{}, id:{}'.format(op, vm_type, vm_id))
        if vm_type == 'non_autorc2':
            LOG.warn(
                'resize from AUTO-RECOVERY to another AUTO-RECOVER flavor')

        to_flavor_id = get_flavor_id(vm_type, 'non_autorc2')
        vm_helper.resize_vm(vm_id, to_flavor_id)

    elif op == 'resize_to_non_vtpm':
        LOG.info('perform {} on type:{}, id:{}'.format(op, vm_type, vm_id))

        to_flavor_id = get_flavor_id(vm_type, 'non_vtpm')

        vm_helper.resize_vm(vm_id, to_flavor_id)

    else:
        LOG.fatal('Unsupported action: {}'.format(op))
        return False
コード例 #13
0
ファイル: test_cpu_policy.py プロジェクト: starlingx/test
def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    image_id = glance_helper.get_image_id_from_name(
        GuestImages.DEFAULT['guest'], strict=True)
    if cpu_pol is not None:
        if pol_source == 'flavor':
            specs = {FlavorSpec.CPU_POLICY: cpu_pol}

            LOG.tc_step("Set following extra specs: {}".format(specs))
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            image_meta = {ImageMetadata.CPU_POLICY: cpu_pol}
            LOG.tc_step(
                "Create image with following metadata: {}".format(image_meta))
            image_id = glance_helper.create_image(
                name='cpu_pol_{}'.format(cpu_pol),
                cleanup='function',
                **image_meta)[1]
    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol),
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step(
        "Boot a vm from {} with above flavor and check vm topology is as "
        "expected".format(boot_source))
    vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus),
                              flavor=flavor_id,
                              source=boot_source,
                              source_id=source_id,
                              cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Suspend/Resume vm and check vm topology stays the same")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Stop/Start vm and check vm topology stays the same")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    prev_siblings = check_helper.check_topology_of_vm(
        vm_id,
        vcpus=flv_vcpus,
        cpu_pol=cpu_pol,
        vm_host=vm_host,
        prev_total_cpus=prev_cpus[vm_host])[1]

    LOG.tc_step("Live migrate vm and check vm topology stays the same")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host],
                                      prev_siblings=prev_siblings)

    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
コード例 #14
0
    def test_pci_vm_nova_actions(self, pci_numa_affinity,
                                 pci_irq_affinity_mask, pci_alias,
                                 vif_model_check, pci_dev_numa_nodes):
        """
        Test vm actions on vm with multiple ports with given vif models on the same tenant network

        Args:

        Setups:
            - create a flavor with dedicated cpu policy (module)
            - choose one tenant network and one internal network to be used by test (module)
            - boot a base vm - vm1 with above flavor and networks, and ping it from NatBox (module)
            - Boot a vm under test - vm2 with above flavor and with multiple ports on same tenant network with base vm,
            and ping it from NatBox      (class)
            - Ping vm2's own data network ips        (class)
            - Ping vm2 from vm1 to verify management and data networks connection    (class)

        Test Steps:
            - Perform given actions on vm2 (migrate, start/stop, etc)
            - Verify ping from vm1 to vm2 over management and data networks still works
            - Verify the correct number of PCI devices are created, in correct types,
                    the numa node of the PCI devices aligns with that of CPUs, and affined CPUs for PCI devices
                    are same as specified by 'pci_alias' (if applicable)

        Teardown:
            - Delete created vms and flavor
        """
        pci_irq_affinity_mask, pci_alias = _convert_irqmask_pcialias(
            pci_irq_affinity_mask, pci_alias)
        boot_forbidden = False
        migrate_forbidden = False
        if pci_numa_affinity == 'required' and pci_alias is not None:
            host_count = pci_dev_numa_nodes
            if host_count == 0:
                boot_forbidden = True
            elif host_count == 1:
                migrate_forbidden = True
        LOG.tc_step(
            "Expected result - Disallow boot: {}; Disallow migrate: {}".format(
                boot_forbidden, migrate_forbidden))

        self.pci_numa_affinity = pci_numa_affinity
        self.pci_alias = pci_alias
        self.pci_irq_affinity_mask = pci_irq_affinity_mask

        if pci_alias is not None:
            LOG.info('Check if PCI-Alias devices existing')
            self.is_pci_device_supported(pci_alias)

        self.vif_model, self.base_vm, self.base_flavor_id, self.nics_to_test, self.seg_id, \
            self.pnet_name, self.extra_pcipt_net = vif_model_check

        LOG.tc_step(
            "Create a flavor with specified extra-specs and dedicated cpu policy"
        )
        flavor_id = self.create_flavor_for_pci()

        LOG.tc_step("Boot a vm with {} vif model on internal net".format(
            self.vif_model))
        # TODO: feature unavailable atm. Update required
        # resource_param = 'pci_vfs_used' if 'sriov' in self.vif_model else 'pci_pfs_used'
        # LOG.tc_step("Get resource usage for {} interface before booting VM(s)".format(self.vif_model))
        # pre_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)

        res, vm_id, err = vm_helper.boot_vm(name=self.vif_model,
                                            flavor=flavor_id,
                                            cleanup='function',
                                            nics=self.nics_to_test,
                                            fail_ok=boot_forbidden)
        if boot_forbidden:
            assert res > 0, "VM booted successfully while it numa node for pcipt/sriov and pci alias mismatch"
            return

        self.vm_id = vm_id

        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=self.seg_id,
                                                       init_conf=True)

        LOG.tc_step("Ping vm over mgmt and internal nets from base vm")
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.vm_id,
                                   net_types=['mgmt', 'internal'])
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        self.vm_topology = vm_helper.get_vm_values(
            vm_id=self.vm_id, fields='wrs-res:topology')[0]
        vnic_type = 'direct' if self.vif_model == 'pci-sriov' else 'direct-physical'
        self.pci_nics = vm_helper.get_vm_nics_info(vm_id=self.vm_id,
                                                   vnic_type=vnic_type)
        assert self.pci_nics

        self.wait_check_vm_states(step='boot')

        # TODO: feature unavailable atm. Update required
        # LOG.tc_step("Check {} usage is incremented by 1".format(resource_param))
        # post_resource_value = nova_helper.get_provider_net_info(self.pnet_name, field=resource_param)
        # expt_change = 2 if self.vif_model == 'pci-passthrough' and self.extra_pcipt_net else 1
        # assert pre_resource_value + expt_change == post_resource_value, "{} usage is not incremented by {} as " \
        #                                                                 "expected".format(resource_param, expt_change)

        LOG.tc_step('Pause/Unpause {} vm'.format(self.vif_model))
        vm_helper.pause_vm(self.vm_id)
        vm_helper.unpause_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after pause/unpause"
        )
        self.wait_check_vm_states(step='pause/unpause')

        LOG.tc_step('Suspend/Resume {} vm'.format(self.vif_model))
        vm_helper.suspend_vm(self.vm_id)
        vm_helper.resume_vm(self.vm_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after suspend/resume"
        )
        self.wait_check_vm_states(step='suspend/resume')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Cold migrate {} vm'.format(self.vif_model))
        code, msg = vm_helper.cold_migrate_vm(self.vm_id,
                                              fail_ok=migrate_forbidden)
        if migrate_forbidden:
            assert code > 0, "Expect migrate fail due to no other host has pcipt/sriov and pci-alias on same numa. " \
                             "Actual: {}".format(msg)
        self.wait_check_vm_states(step='cold-migrate')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after cold migration"
        )
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step('Set vm to error and wait for it to be auto recovered')
        vm_helper.set_vm_state(vm_id=self.vm_id,
                               error_state=True,
                               fail_ok=False)
        vm_helper.wait_for_vm_values(vm_id=self.vm_id,
                                     status=VMStatus.ACTIVE,
                                     fail_ok=False,
                                     timeout=600)

        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after auto recovery"
        )
        self.wait_check_vm_states(step='set-error-state-recover')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step("Hard reboot {} vm".format(self.vif_model))
        vm_helper.reboot_vm(self.vm_id, hard=True)
        LOG.tc_step(
            "Check vm still pingable over mgmt and internal nets after nova reboot hard"
        )
        self.wait_check_vm_states(step='hard-reboot')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])

        LOG.tc_step(
            "Create a flavor with dedicated cpu policy and resize vm to new flavor"
        )
        resize_flavor = nova_helper.create_flavor(name='dedicated',
                                                  ram=2048,
                                                  cleanup='function')[1]
        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=resize_flavor, **extra_specs)
        vm_helper.resize_vm(self.vm_id, resize_flavor)

        LOG.tc_step("Check vm still reachable after resize")
        self.wait_check_vm_states(step='resize')
        if 'pci-passthrough' == self.vif_model:
            LOG.tc_step(
                "Add/Check vlan interface is added to pci-passthrough device for vm {}."
                .format(self.vm_id))
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=self.vm_id,
                                                       net_seg_id=self.seg_id)
        vm_helper.ping_vms_from_vm(to_vms=self.vm_id,
                                   from_vm=self.base_vm,
                                   net_types=['mgmt', 'internal'])
コード例 #15
0
def _test_basic_swift_provisioning(pool_size, pre_swift_check):
    """
    Verifies basic swift provisioning works as expected
    Args:
        pool_size:
        pre_swift_check:

    Returns:

    """
    ceph_backend_info = get_ceph_backend_info()

    if pool_size == 'default' and pre_swift_check[0]:
        skip("Swift is already provisioned")

    if pool_size == 'fixed_size' and pre_swift_check[0]:
        skip("Swift is already provisioned and set to non-default pool value")

    object_pool_gib = None
    cinder_pool_gib = ceph_backend_info['cinder_pool_gib']

    if pool_size == 'default':
        if not ceph_backend_info['object_gateway']:
            LOG.tc_step("Enabling SWIFT object store .....")

    else:
        if not ceph_backend_info['object_gateway']:
            skip("Swift is not provisioned")

        total_gib = ceph_backend_info['ceph_total_space_gib']
        unallocated_gib = (total_gib - cinder_pool_gib -
                           ceph_backend_info['glance_pool_gib'] -
                           ceph_backend_info['ephemeral_pool_gib'])
        if unallocated_gib == 0:
            unallocated_gib = int(int(cinder_pool_gib) / 4)
            cinder_pool_gib = str(int(cinder_pool_gib) - unallocated_gib)
        elif unallocated_gib < 0:
            skip("Unallocated gib < 0. System is in unknown state.")

        object_pool_gib = str(unallocated_gib)
        LOG.tc_step(
            "Enabling SWIFT object store and setting object pool size to {}....."
            .format(object_pool_gib))

    rc, updated_backend_info = storage_helper.modify_storage_backend(
        'ceph',
        object_gateway=False,
        cinder=cinder_pool_gib,
        object_gib=object_pool_gib,
        services='cinder,glance,nova,swift')

    LOG.info("Verifying if swift object gateway is enabled...")
    assert str(updated_backend_info['object_gateway']).lower() == 'true', "Fail to enable Swift object gateway: {}"\
        .format(updated_backend_info)
    LOG.info("Swift object gateway is enabled.")

    LOG.info("Verifying ceph task ...")
    state = storage_helper.get_storage_backends(backend='ceph',
                                                field='state')[0]
    if system_helper.wait_for_alarm(alarm_id=EventLogID.CONFIG_OUT_OF_DATE,
                                    timeout=10,
                                    fail_ok=True,
                                    entity_id='controller-')[0]:
        LOG.info("Verifying ceph task is set to 'add-object-gateway'...")
        assert BackendState.CONFIGURING == state, \
            "Unexpected ceph state '{}' after swift object gateway update ".format(state)

        LOG.info("Lock/Unlock controllers...")
        active_controller, standby_controller = system_helper.get_active_standby_controllers(
        )
        LOG.info("Active Controller is {}; Standby Controller is {}...".format(
            active_controller, standby_controller))

        for controller in [standby_controller, active_controller]:
            if not controller:
                continue
            HostsToRecover.add(controller)
            host_helper.lock_host(controller, swact=True)
            storage_helper.wait_for_storage_backend_vals(
                backend='ceph-store',
                **{
                    'task': BackendTask.RECONFIG_CONTROLLER,
                    'state': BackendState.CONFIGURING
                })
            host_helper.unlock_host(controller)

        system_helper.wait_for_alarm_gone(
            alarm_id=EventLogID.CONFIG_OUT_OF_DATE, fail_ok=False)
    else:
        assert BackendState.CONFIGURED == state, \
            "Unexpected ceph state '{}' after swift object gateway update ".format(state)

    LOG.info("Verifying Swift provisioning setups...")
    assert verify_swift_object_setup(), "Failure in swift setups"

    for i in range(3):
        vm_name = 'vm_swift_api_{}'.format(i)
        LOG.tc_step(
            "Boot vm {} and perform nova actions on it".format(vm_name))
        vm_id = vm_helper.boot_vm(name=vm_name, cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(
            vm_id, timeout=VMTimeout.DHCP_RETRY)

        LOG.info("Cold migrate VM {} ....".format(vm_name))
        rc = vm_helper.cold_migrate_vm(vm_id=vm_id)[0]
        assert rc == 0, "VM {} failed to cold migrate".format(vm_name)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.info("Live migrate VM {} ....".format(vm_name))
        rc = vm_helper.live_migrate_vm(vm_id=vm_id)[0]
        assert rc == 0, "VM {} failed to live migrate".format(vm_name)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.info("Suspend/Resume VM {} ....".format(vm_name))
        vm_helper.suspend_vm(vm_id)
        vm_helper.resume_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.info("Checking overall system health...")
    assert system_helper.get_system_health_query(
    ), "System health not OK after VMs"

    LOG.tc_step("Create Swift container using swift post cli command ...")
    container_names = [
        "test_container_1", "test_container_2", "test_container_3"
    ]

    for container in container_names:
        LOG.info("Creating swift object container {}".format(container))
        rc, out = swift_helper.create_swift_container(container)
        assert rc == 0, "Fail to create swift container {}".format(container)
        LOG.info(
            "Create swift object container {} successfully".format(container))

    LOG.tc_step("Verify swift list to list containers ...")
    container_list = swift_helper.get_swift_containers()[1]
    assert set(container_names) <= set(container_list), "Swift containers {} not listed in {}"\
        .format(container_names, container_list)

    LOG.tc_step("Verify swift delete a container...")
    container_to_delete = container_names[2]
    rc, out = swift_helper.delete_swift_container(container_to_delete)
    assert rc == 0, "Swift delete container rejected: {}".format(out)
    assert container_to_delete not in swift_helper.get_swift_containers()[1], "Unable to delete swift container {}"\
        .format(container_to_delete)

    LOG.tc_step("Verify swift stat to show info of a single container...")
    container_to_stat = container_names[0]
    out = swift_helper.get_swift_container_stat_info(container_to_stat)
    assert out["Container"] == container_to_stat, "Unable to stat swift container {}"\
        .format(container_to_stat)
    assert out["Objects"] == '0', "Incorrect number of objects container {}. Expected O objects, but has {} objects"\
        .format(container_to_stat, out["Objects"])
コード例 #16
0
def test_vm_with_config_drive(hosts_per_stor_backing):
    """
    Skip Condition:
        - no host with local_image backend

    Test Steps:
        - Launch a vm using config drive
        - Add test data to config drive on vm
        - Do some operations (reboot vm for simplex, cold migrate and lock host for non-simplex) and
            check test data persisted in config drive after each operation
    Teardown:
        - Delete created vm, volume, flavor

    """
    guest_os = 'cgcs-guest'
    # guest_os = 'tis-centos-guest'  # CGTS-6782
    img_id = glance_helper.get_guest_image(guest_os)
    hosts_num = len(hosts_per_stor_backing.get('local_image', []))
    if hosts_num < 1:
        skip("No host with local_image storage backing")

    volume_id = cinder_helper.create_volume(name='vol_inst1',
                                            source_id=img_id,
                                            guest_image=guest_os)[1]
    ResourceCleanup.add('volume', volume_id, scope='function')

    block_device = {
        'source': 'volume',
        'dest': 'volume',
        'id': volume_id,
        'device': 'vda'
    }
    vm_id = vm_helper.boot_vm(name='config_drive',
                              config_drive=True,
                              block_device=block_device,
                              cleanup='function',
                              guest_os=guest_os,
                              meta={'foo': 'bar'})[1]

    LOG.tc_step("Confirming the config drive is set to True in vm ...")
    assert str(vm_helper.get_vm_values(vm_id, "config_drive")[0]) == 'True', \
        "vm config-drive not true"

    LOG.tc_step("Add date to config drive ...")
    check_vm_config_drive_data(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    instance_name = vm_helper.get_vm_instance_name(vm_id)
    LOG.tc_step("Check config_drive vm files on hypervisor after vm launch")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)

    if not system_helper.is_aio_simplex():
        LOG.tc_step("Cold migrate VM")
        vm_helper.cold_migrate_vm(vm_id)

        LOG.tc_step("Check config drive after cold migrate VM...")
        check_vm_config_drive_data(vm_id)

        LOG.tc_step("Lock the compute host")
        compute_host = vm_helper.get_vm_host(vm_id)
        HostsToRecover.add(compute_host)
        host_helper.lock_host(compute_host, swact=True)

        LOG.tc_step("Check config drive after locking VM host")
        check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.DHCP_RETRY)
        vm_host = vm_helper.get_vm_host(vm_id)

    else:
        LOG.tc_step("Reboot vm")
        vm_helper.reboot_vm(vm_id)

        LOG.tc_step("Check config drive after vm rebooted")
        check_vm_config_drive_data(vm_id)

    LOG.tc_step("Check vm files exist after nova operations")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)
コード例 #17
0
ファイル: ~test_shared_cpu.py プロジェクト: pvaduva/auto_test
    def test_launch_vm_with_shared_cpu(self, vcpus, shared_vcpu, error, add_shared_cpu, origin_total_vcpus):
        """
        Test boot vm cli returns error when system does not meet the shared cpu requirement(s) in given flavor

        Args:
            vcpus (int): number of vcpus to set when creating flavor
            shared_vcpu (int):
            error
            add_shared_cpu
            origin_total_vcpus

        Setup:
            - Configure one compute to have shared cpus via 'system host-cpu-modify -f shared p0=1,p1=1 <hostname>'

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, number of numa nodes, nume_node.0 , shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor
            - Ensure vm is booted successfully
            - Validate the shared cpu
            - Live migrate the vm
            - Re-validate the shared cpu
            - Cold migrate the vm
            - Re-validate the shared cpu

        Teardown:
            - Delete created vm if any (function)
            - Delete created volume if any (module)
            - Set shared cpus to 0 (default setting) on the compute node under test (module)

        """
        storage_backing, shared_cpu_hosts, max_vcpus_per_proc = add_shared_cpu
        LOG.tc_step("Create a flavor with given number of vcpus")

        flavor = create_shared_flavor(vcpus, storage_backing=storage_backing, shared_vcpu=shared_vcpu)

        LOG.tc_step("Boot a vm with above flavor")
        code, vm_id, output = vm_helper.boot_vm(name='shared_cpu', flavor=flavor, fail_ok=True, cleanup='function')

        if error:
            LOG.tc_step("Check vm boot fail")
            assert 1 == code, "Expect error vm. Actual result: {}".format(output)
            LOG.tc_step("Ensure vm is in error state with expected fault message in nova show")
            vm_helper.wait_for_vm_values(vm_id, 10, status='ERROR', fail_ok=False)
            actual_fault = vm_helper.get_vm_fault_message(vm_id)
            expt_fault = 'shared vcpu with 0 requested dedicated vcpus is not allowed'
            assert expt_fault in actual_fault, "Expected fault message mismatch"
            return

        LOG.tc_step("Check vm booted successfully and shared cpu indicated in vm-topology")
        assert 0 == code, "Boot vm failed. Details: {}".format(output)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # live migrate
        LOG.tc_step("Live migrate vm and then ping vm from NatBox")
        vm_helper.live_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)

        # cold migrate
        LOG.tc_step("Cold migrate vm and then ping vm from NatBox")
        vm_helper.cold_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_shared_vcpu(vm=vm_id, shared_vcpu=shared_vcpu, vcpus=vcpus, prev_total_vcpus=origin_total_vcpus)
コード例 #18
0
def test_cold_migrate():
    vm_id = vm_helper.launch_vms_via_script()[0]
    vm_helper.cold_migrate_vm(vm_id)
    vm_helper.cold_migrate_vm(vm_id, revert=True)
コード例 #19
0
def test_vm_vcpu_model(vcpu_model, vcpu_source, boot_source,
                       cpu_models_supported):
    """
    Test vcpu model specified in flavor will be applied to vm. In case host does not support specified vcpu model,
    proper error message should be displayed in nova show.

    Args:
        vcpu_model
        vcpu_source
        boot_source

    Test Steps:
        - Set flavor extra spec or image metadata with given vcpu model.
        - Boot a vm from volume/image
        - Stop and then start vm and ensure that it retains its cpu model
        - If vcpu model is supported by host,
            - Check vcpu model specified in flavor/image is used by vm via virsh, ps aux (and /proc/cpuinfo)
            - Live migrate vm and check vcpu model again
            - Cold migrate vm and check vcpu model again
        - If vcpu model is not supported by host, check proper error message is included if host does not
            support specified vcpu model.
    Teardown:
        - Delete created vm, volume, image, flavor

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    flv_model = vcpu_model if vcpu_source == 'flavor' else None
    img_model = vcpu_model if vcpu_source == 'image' else None
    code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                        img_model=img_model,
                                        boot_source=boot_source)

    is_supported = (not vcpu_model) or (vcpu_model == 'Passthrough') or (
        vcpu_model in all_cpu_models_supported)
    if not is_supported:
        LOG.tc_step(
            "Check vm in error state due to vcpu model unsupported by hosts.")
        assert 1 == code, "boot vm cli exit code is not 1. Actual fail reason: {}".format(
            msg)

        expt_fault = VCPUSchedulerErr.CPU_MODEL_UNAVAIL
        res_bool, vals = vm_helper.wait_for_vm_values(vm,
                                                      10,
                                                      regex=True,
                                                      strict=False,
                                                      status='ERROR')
        err = vm_helper.get_vm_fault_message(vm)

        assert res_bool, "VM did not reach expected error state. Actual: {}".format(
            vals)
        assert re.search(expt_fault, err), "Incorrect fault reported. Expected: {} Actual: {}" \
            .format(expt_fault, err)
        return

    # System supports specified vcpu, continue to verify
    expt_arch = None
    if vcpu_model == 'Passthrough':
        host = vm_helper.get_vm_host(vm)
        expt_arch = host_helper.get_host_cpu_model(host)

    LOG.tc_step("Check vm is launched with expected vcpu model")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    multi_hosts_supported = (not vcpu_model) or (vcpu_model in cpu_models_multi_host) or \
                            (vcpu_model == 'Passthrough' and cpu_models_multi_host)
    # TC5141
    LOG.tc_step(
        "Stop and then restart vm and check if it retains its vcpu model")
    vm_helper.stop_vms(vm)
    vm_helper.start_vms(vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    if not multi_hosts_supported:
        LOG.info(
            "Skip migration steps. Less than two hosts in same storage aggregate support {}"
            .format(vcpu_model))
        return

    LOG.tc_step(
        "Live (block) migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.live_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)

    LOG.tc_step("Cold migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.cold_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)
コード例 #20
0
 def operation_cold(vm_id_):
     code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_)
     assert 0 == code, msg
     vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id_)
     vm_helper.ping_between_routed_vms(vm_id, vm_observer, vshell=False)
コード例 #21
0
def test_instantiate_a_vm_with_a_large_volume_and_cold_migrate(
        vms_, pre_alarm_):
    """
    Test instantiate a vm with a large volume ( 20 GB and 40 GB) and cold
    migrate:
    Args:
        vms_ (dict): vms created by vms_ fixture
        pre_alarm_ (list): alarm lists obtained by pre_alarm_ fixture

    Test Setups:
    - get tenant1 and management networks which are already created for lab
    setup
    - get or create a "small" flavor
    - get the guest image id
    - create two large volumes (20 GB and 40 GB) in cinder
    - boot two vms ( test_inst1, test_inst2) using  volumes 20 GB and 40 GB
    respectively


    Test Steps:
    - Verify VM status is ACTIVE
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify the VM can be pinged from NATBOX
    - Verify login to VM and rootfs (dev/vda) filesystem is rw mode
    - Attempt to cold migrate of VMs
    - Validate that the VMs migrated and no errors or alarms are present
    - Log into both VMs and validate that file systems are read-write
    - Terminate VMs

    Skip conditions:
    - less than two hosts with the same storage backing
    - less than two computes
    - no storage node

    """
    LOG.tc_step("Instantiate a vm with large volume.....")

    vms = vms_

    for vm in vms:
        vm_id = vm['id']

        LOG.tc_step(
            "Checking VM status; VM Instance id is: {}......".format(vm_id))
        vm_state = vm_helper.get_vm_status(vm_id)

        assert vm_state == VMStatus.ACTIVE, 'VM {} state is {}; Not in ' \
                                            'ACTIVATE state as expected' \
            .format(vm_id, vm_state)

        LOG.tc_step("Verify  VM can be pinged from NAT box...")
        rc, boot_time = check_vm_boot_time(vm_id)
        assert rc, "VM is not pingable after {} seconds ".format(boot_time)

        LOG.tc_step("Verify Login to VM and check filesystem is rw mode....")
        assert is_vm_filesystem_rw(
            vm_id), 'rootfs filesystem is not RW as expected for VM {}' \
            .format(vm['display_name'])

        LOG.tc_step(
            "Attempting  cold migration; vm id = {}; vm_name = {} ....".format(
                vm_id, vm['display_name']))

        code, msg = vm_helper.cold_migrate_vm(vm_id=vm_id, fail_ok=True)
        LOG.tc_step("Verify cold migration succeeded...")
        assert code == 0, "Expected return code 0. Actual return code: {}; " \
                          "details: {}".format(code, msg)

        LOG.tc_step(
            "Verifying  filesystem is rw mode after cold migration....")
        assert is_vm_filesystem_rw(
            vm_id), 'After cold migration rootfs filesystem is not RW as ' \
                    'expected for ' \
                    'VM {}'.format(vm['display_name'])
コード例 #22
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
コード例 #23
0
def test_instantiate_a_vm_with_multiple_volumes_and_migrate():
    """
    Test  a vm with a multiple volumes live, cold  migration and evacuation:

    Test Setups:
    - get guest image_id
    - get or create 'small' flavor_id
    - get tenenat and managment network ids

    Test Steps:
    - create volume for boot and another extra size 8GB
    - boot vms from the created volume
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify VM status is ACTIVE
    - Attach the second volume to VM
    - Attempt live migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Attempt cold migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Reboot the compute host to initiate evacuation
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Terminate VMs

    Skip conditions:
    - less than two computes
    - less than one storage

    """
    # skip("Currently not working. Centos image doesn't see both volumes")
    LOG.tc_step("Creating a volume size=8GB.....")
    vol_id_0 = cinder_helper.create_volume(size=8)[1]
    ResourceCleanup.add('volume', vol_id_0, scope='function')

    LOG.tc_step("Creating a second volume size=8GB.....")
    vol_id_1 = cinder_helper.create_volume(size=8, bootable=False)[1]
    LOG.tc_step("Volume id is: {}".format(vol_id_1))
    ResourceCleanup.add('volume', vol_id_1, scope='function')

    LOG.tc_step("Booting instance vm_0...")

    vm_id = vm_helper.boot_vm(name='vm_0',
                              source='volume',
                              source_id=vol_id_0,
                              cleanup='function')[1]
    time.sleep(5)

    LOG.tc_step("Verify  VM can be pinged from NAT box...")
    rc, boot_time = check_vm_boot_time(vm_id)
    assert rc, "VM is not pingable after {} seconds ".format(boot_time)

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw(
        vm_id), 'vol_0 rootfs filesystem is not RW as expected.'

    LOG.tc_step("Attemping to attach a second volume to VM...")
    vm_helper.attach_vol_to_vm(vm_id, vol_id_1)

    LOG.tc_step(
        "Login to VM and to check filesystem is rw mode for both volumes....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'volumes rootfs ' \
                                                       'filesystem is not RW ' \
                                                       'as expected.'

    LOG.tc_step("Attemping live migrate VM...")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After live migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'

    LOG.tc_step("Attempting  cold migrate VM...")
    vm_helper.cold_migrate_vm(vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After cold migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'
    LOG.tc_step("Testing VM evacuation.....")
    before_host_0 = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Rebooting compute {} to initiate vm evacuation .....".format(
        before_host_0))
    vm_helper.evacuate_vms(host=before_host_0,
                           vms_to_check=vm_id,
                           ping_vms=True)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After evacuation ' \
                                                       'filesystem is not RW'
コード例 #24
0
def _test_ea_max_vms_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify maximum number of guests with Crypto VFs can be launched and
    stabilized

    Args:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Pci device  {}".format(hosts_pci_device_info))

    flavor_id = _flavors['flavor_qat_vf_4']
    # Assume we only have 1 coleto creek pci device on system
    crypto_hosts = list(hosts_pci_device_info.keys())
    host = crypto_hosts[0]
    vf_info = hosts_pci_device_info[host][0]
    vf_device_id = vf_info['vf_device_id']
    vf_count = vf_info['vf_count']
    LOG.info("Vf_device_id {}, count: {}".format(vf_device_id, vf_count))

    # number of vms to launch to max out the total configured device VFs. Each VM is launched with 4 Vfs. 4 Vfs in each
    # compute are reserved for resize nova action.

    number_of_vms = int((vf_count - 4 * len(crypto_hosts)) / 4)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type}]

    vm_helper.ensure_vms_quotas(number_of_vms + 10)

    vms = {}
    LOG.tc_step("Launch {} vms using flavor flavor_qat_vf_4 and nics {}".format(number_of_vms, nics))
    for i in range(1, number_of_vms + 1):
        vm_name = 'vm_crypto_{}'.format(i)
        vm_id = vm_helper.boot_vm(cleanup='function', name='vm_crypto_{}'.format(i), nics=nics, flavor=flavor_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vms[vm_name] = vm_id

    for vm_name_, vm_id_ in vms.items():
        vm_host = vm_helper.get_vm_host(vm_id_)
        host_dev_name = hosts_pci_device_info[vm_host][0]['device_name']
        expt_qat_devs = {host_dev_name: 4}
        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)

        LOG.info("Checking if other host has room for cold migrate vm {}".format(vm_name_))
        for host_ in crypto_hosts:
            if host_ != vm_host:
                total_vfs, used_vfs = network_helper.get_pci_device_vfs_counts_for_host(
                    host_, device_id=vf_device_id, fields=('pci_vfs_configured', 'pci_vfs_used'))

                if int(total_vfs) - int(used_vfs) >= 4:
                    LOG.info("Migrate to other host is possible")
                    expt_res = 0
                    break
        else:
            LOG.info("Migrate to other host is not possible")
            expt_res = 2

        LOG.tc_step("Attempt to cold migrate {} and ensure it {}".format(vm_name_,
                                                                         'succeeds' if expt_res == '0' else 'fails'))
        rc, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert expt_res == rc, "Expected: {}. Actual: {}".format(expt_res, msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        LOG.tc_step("Suspend/resume VM {} ....".format(vm_name_))
        vm_helper.suspend_vm(vm_id_)
        vm_helper.resume_vm(vm_id_)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # vm_host = nova_helper.get_vm_host(vm_id_)
        # total, used = network_helper.get_pci_device_vfs_counts_for_host(vm_host, vf_device_id)[0]
        # if (total - int(used)) >= 4:
        #     expt_res = 0

        flavor_resize_id = _flavors['flavor_resize_qat_vf_4']
        LOG.tc_step("Resize VM {} to new flavor {} with increased memory...".format(vm_name_, flavor_resize_id))
        vm_helper.resize_vm(vm_id_, flavor_resize_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # else:
        #     expt_res = 1
        #     LOG.info("Resizing of vm {} skipped; host {} max out vfs; used vfs = {}".format(vm_name_, vm_host, used))

        LOG.tc_step("Attempt to live migrate {} and ensure it's rejected".format(vm_name_))
        rc, msg = vm_helper.live_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert 6 == rc, "Expect live migration to fail on vm with pci alias device. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)
コード例 #25
0
def test_cold_migrate_vm(storage_backing, ephemeral, swap, cpu_pol, vcpus,
                         vm_type, resize, hosts_per_stor_backing,
                         no_simplex):
    """
    Skip Condition:
        - Less than two hosts have specified storage backing

    Test Steps:
        - create flavor with specified vcpus, cpu_policy, ephemeral, swap,
        storage_backing
        - boot vm from specified boot source with above flavor
        - (attach volume to vm if 'image_with_vol', specified in vm_type)
        - Cold migrate vm
        - Confirm/Revert resize as specified
        - Verify VM is successfully cold migrated and confirmed/reverted resize
        - Verify that instance files are not found on original host. (TC6621)

    Teardown:
        - Delete created vm, volume, flavor

    """
    if len(hosts_per_stor_backing.get(storage_backing, [])) < 2:
        skip("Less than two hosts have {} storage backing".format(
            storage_backing))

    vm_id = _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol,
                                vcpus, vm_type)
    prev_vm_host = vm_helper.get_vm_host(vm_id)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    vm_disks = vm_helper.get_vm_devices_via_virsh(vm_id)
    file_paths, content = touch_files_under_vm_disks(vm_id=vm_id,
                                                     ephemeral=ephemeral,
                                                     swap=swap, vm_type=vm_type,
                                                     disks=vm_disks)

    LOG.tc_step("Cold migrate VM and {} resize".format(resize))
    revert = True if resize == 'revert' else False
    code, output = vm_helper.cold_migrate_vm(vm_id, revert=revert)
    assert 0 == code, "Cold migrate {} is not successful. Details: {}".format(
        resize, output)

    # Below steps are unnecessary as host is already checked in
    # cold_migrate_vm keyword. Add steps below just in case.
    LOG.tc_step(
        "Check VM host is as expected after cold migrate {}".format(resize))
    post_vm_host = vm_helper.get_vm_host(vm_id)
    if revert:
        assert prev_vm_host == post_vm_host, "vm host changed after cold " \
                                             "migrate revert"
    else:
        assert prev_vm_host != post_vm_host, "vm host did not change after " \
                                             "cold migrate"
        LOG.tc_step("Check that source host no longer has instance files")
        with host_helper.ssh_to_host(prev_vm_host) as prev_ssh:
            assert not prev_ssh.file_exists(
                '/var/lib/nova/instances/{}'.format(vm_id)), \
                "Instance files found on previous host {} after cold migrate " \
                "to {}".format(prev_vm_host, post_vm_host)

    LOG.tc_step("Ensure vm is pingable from NatBox after cold migration "
                "{}".format(resize))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.tc_step("Check files after cold migrate {}".format(resize))
    action = None if revert else 'cold_migrate'
    check_helper.check_vm_files(vm_id=vm_id, storage_backing=storage_backing,
                                ephemeral=ephemeral, swap=swap,
                                vm_type=vm_type, vm_action=action,
                                file_paths=file_paths, content=content,
                                disks=vm_disks, prev_host=prev_vm_host,
                                post_host=post_vm_host)
コード例 #26
0
ファイル: ~test_shared_cpu.py プロジェクト: pvaduva/auto_test
    def test_shared_cpu_migrate(self, config_host_cpus):
        """
        Test vm with shared cpus enabled can successful live migrate to a node with shared vcpus enabled and fails when
        it tries to migrate to a node with shared vcpus disabled

        Setup:
            - Skip if there are less than 3 hosts
            - Configure at least one compute to disable shared vcpus
            - Configure at least two computes to have shared cpus via
                'system host-cpu-modify -f shared p0=1,p1=1 <hostname>' (module)

        Test Steps:
            - Create flavor with given number of vcpus
            - Add specific cpu_policy, shared_vcpu values to flavor extra specs
            - Boot a vm with the flavor
            - Ensure vm is booted successfully
            - Perform a non-forced live migration on vm. Ensure that vm is on a shared cpu host.
            - Perform a non-forced cold migration on vm. Ensure that vm is on a shared cpu host.
            - Force live-migrate vm to host with shared vcpus enabled. The migration should succeed
                - Ensure that the vm is on a different host
            - Force live-migrate vm to the host with disabled shared vcpus. The migration should fail
                - Verify error by ensuring that vm is still on same host and grep nova-scheduler logs for
                'CANNOT SCHEDULE'

        Teardown:
            - Delete created vm if any (function)
            - Revert any hosts that were changed for this test

        """

        storage_backing, disable_shared_cpu_host, enabled_shared_hosts = config_host_cpus

        LOG.tc_step("Create a flavor with given number of vcpus")
        flavor = create_shared_flavor(vcpus=2, storage_backing=storage_backing, shared_vcpu=1)
        nova_helper.set_flavor(flavor, **{FlavorSpec.MEM_PAGE_SIZE: 2048})

        LOG.tc_step("Boot a vm with above flavor, and ensure vm is booted successfully")
        vm_id = vm_helper.boot_vm(name='shared_cpu', flavor=flavor, fail_ok=False, cleanup='function')[1]
        origin_host = vm_helper.get_vm_host(vm_id)
        assert origin_host in enabled_shared_hosts, "VM not booted on shared cpu host"

        LOG.tc_step("Perform a non-forced live migration onto an enabled shared cpu host, expect success")
        vm_helper.live_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        new_host = vm_helper.get_vm_host(vm_id)
        assert new_host in enabled_shared_hosts, "VM not migrated on shared cpu host"

        LOG.tc_step("Perform a non-forced cold migration onto an enabled shared cpu host, expect success")
        vm_helper.cold_migrate_vm(vm_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        new_host = vm_helper.get_vm_host(vm_id)
        assert new_host in enabled_shared_hosts, "VM not migrated on shared cpu host"

        if new_host != enabled_shared_hosts[0]:
            dest_host = enabled_shared_hosts[0]
        else:
            dest_host = enabled_shared_hosts[1]

        LOG.tc_step("Perform second live migration onto an enabled shared cpu host, expect success")
        vm_helper.live_migrate_vm(vm_id, destination_host=dest_host)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        LOG.tc_step("Perform third live migration onto a disabled shared cpu host, expect failure")
        code = vm_helper.live_migrate_vm(vm_id, destination_host=disable_shared_cpu_host, fail_ok=True)[0]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        assert code > 0, "Migrate not rejected as expected"
        assert vm_helper.get_vm_host(vm_id) == dest_host, "VM not on same compute node"

        LOG.tc_step("Verify second live migration failed via nova-scheduler.log")
        req_id = get_failed_live_migrate_action_id(vm_id)
        grepcmd = "grep 'CANNOT SCHEDULE' /var/log/nova/nova-scheduler.log | grep {}".format(req_id)
        control_ssh = ControllerClient.get_active_controller()
        control_ssh.exec_cmd(grepcmd, fail_ok=False)
コード例 #27
0
def test_migrate_vm(check_system, guest_os, mig_type, cpu_pol):
    """
    Test migrate vms for given guest type
    Args:
        check_system:
        guest_os:
        mig_type:
        cpu_pol:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image
        - Live/cold migrate the vm
        - Ensure vm moved to other host and in good state (active and
            reachabe from NatBox)

    """
    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = \
        nova_helper.create_flavor(name='{}-mig'.format(mig_type), vcpus=1,
                                  root_disk=9, cleanup='function')[1]

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    image_id = glance_helper.get_guest_image(guest_os=guest_os)

    vol_id = cinder_helper.create_volume(source_id=image_id, size=9,
                                         guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm(guest_os, flavor=flavor_id, source='volume',
                              source_id=vol_id, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if guest_os == 'ubuntu_14':
        system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CINDER_IO_CONGEST,
                                          entity_id='cinder_io_monitor',
                                          strict=False, timeout=300,
                                          fail_ok=False)

    LOG.tc_step("{} migrate vm and check vm is moved to different host".format(
        mig_type))
    prev_vm_host = vm_helper.get_vm_host(vm_id)

    if mig_type == 'live':
        code, output = vm_helper.live_migrate_vm(vm_id)
        if code == 1:
            assert False, "No host to live migrate to. System may not be in " \
                          "good state."
    else:
        vm_helper.cold_migrate_vm(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    assert prev_vm_host != vm_host, "vm host did not change after {} " \
                                    "migration".format(mig_type)

    LOG.tc_step("Ping vm from NatBox after {} migration".format(mig_type))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)