コード例 #1
0
ファイル: ~test_fip.py プロジェクト: pvaduva/auto_test
def obsolete_test_fip(fip_setups):
    """
    Test VM Floating IP  over VM launch, live-migration, cold-migration, pause/unpause, etc

    Args:
        fip_setups: test fixture

    Test Setups (module):
        - Create a floating ip
        - boot a vm
        - Attach floating ip to vm

    Test Steps:
        - Ping  VM FIP
        - Live-migrate the VM and verify ping from VM
        - Cold-migrate the VM and verify ping from VM
        - Pause and un-pause the VM and verify ping from VM
        - Suspend and resume the VM and verify ping from VM
        - Stop and start the VM and verify ping from VM
        - Reboot the VM and verify ping from VM
        - Ping  VM FIP

    Test Teardown:
        - Delete created FIP and vm (module)

    """
    vm_id, fip = fip_setups
    LOG.tc_step("Ping VM with Floating IP ")
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Live-migrate the VM and verify ping from VM")
    vm_helper.live_migrate_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Cold-migrate the VM and verify ping from VM")
    vm_helper.cold_migrate_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Pause and un-pause the VM and verify ping from VM")
    vm_helper.pause_vm(vm_id)
    vm_helper.unpause_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Suspend and resume the VM and verify ping from VM")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Stop and start the VM and verify ping from VM")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Reboot the VM and verify ping from VM")
    vm_helper.reboot_vm(vm_id)
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)

    LOG.tc_step("Ping VM with Floating IP Ensure FIP reachable ")
    vm_helper.ping_ext_from_vm(vm_id, use_fip=True)
コード例 #2
0
def rescue_vm(vm_type, vm_id):
    if 'non_autorc' in vm_type:
        vm_helper.wait_for_vm_status(vm_id,
                                     status=VMStatus.ERROR,
                                     timeout=VMTimeout.AUTO_RECOVERY)

        LOG.warn('VM got into ERROR status as expected')
        LOG.warn('Attempting to rescure the VM:{}'.format(vm_id))
        vm_helper.stop_vms(vm_id)
        vm_helper.start_vms(vm_id)
コード例 #3
0
def test_stop_start_instances(launch_instances):
    vm_helper.stop_vms(vms=launch_instances)
    vm_helper.start_vms(vms=launch_instances)
コード例 #4
0
def test_patch_orch_with_ignored_alarms(patch_orchestration_setup, patch_function_check, ignored_alarm_texts):
    """
    This test verifies the patch orchestration operation with presence of alarms that are normally ignored by the
    orchestration. These alarms are '200.001', '700.004,', '900.001', '900.005', '900.101'. This test generates the
    alarms host lock (200.001) and VM stopped ( 700.004) before executing the patch orchestration.
    Args:
        patch_orchestration_setup:
        patch_function_check
        ignored_alarm_texts:

    Returns:

    """
    vms = patch_function_check
    patches, controllers, computes, storages = patch_orchestration_setup
    hosts = controllers + computes + storages
    patch_id = patching_helper.parse_test_patches(patch_ids=patches, search_str='INSVC_ALLNODES')[0]

    if 'HOST_LOCK' in ignored_alarm_texts and len(hosts) < 2:
        skip("Not enough hosts present in the system")

    if 'HOST_LOCK' in ignored_alarm_texts:
        host = hosts[-1]
        HostsToRecover.add(host)
        LOG.info("Lock host {} to generate 200.001 alarm".format(host))
        host_helper.lock_host(host)
        system_helper.wait_for_alarm(alarm_id='200.001', fail_ok=False)
        LOG.info("Host {} is locked and 200.001 alarm is generated".format(host))

    vm_id_to_stop = None
    if 'VM_STOP' in ignored_alarm_texts:
        vm_id_to_stop = vms[0]
        LOG.info("Stop VM {} to generate 700.004 alarm".format(vm_id_to_stop))
        vm_helper.stop_vms(vm_id_to_stop)
        system_helper.wait_for_alarm(alarm_id='700.004')

    patch_file = patches[patch_id]

    LOG.tc_step("Upload patch file {}".format(patch_file))
    uploaded_id = patching_helper.upload_patches(patch_files=patch_file)[1][0]
    assert patch_id == uploaded_id, "Expected patch {} and uploaded patch {} mismatch".format(patch_id, uploaded_id)
    LOG.info("Patch {} uploaded".format(uploaded_id))

    LOG.tc_step("Apply patch {}".format(uploaded_id))
    applied = patching_helper.apply_patches(patch_ids=[uploaded_id])[1]
    LOG.info("Patch {} applied".format(applied))

    LOG.tc_step("Install patch {} through orchestration".format(uploaded_id))
    patching_helper.wait_for_affecting_alarms_gone()
    run_patch_orchestration_strategy()
    LOG.info("Install patch through orchestration completed for patch {}".format(applied))
    host_helper.wait_for_hosts_ready(hosts=hosts)

    LOG.tc_step("Check vms after patch is installed.")
    if vm_id_to_stop:
        vm_helper.start_vms(vm_id_to_stop)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_to_stop)
    check_vms(vms)

    LOG.tc_step("Remove test patch {}".format(applied))
    if vm_id_to_stop:
        vm_helper.stop_vms(vm_id_to_stop)

    patching_helper.remove_patches(patch_ids=applied)

    LOG.tc_step("Remove patch through orchestration: {}".format(applied))
    run_patch_orchestration_strategy(alarm_restrictions='relaxed')
    LOG.info("Apply/Remove through patch orchestration completed for patch {}".format(applied))

    LOG.tc_step("Check vms after patch removed: {}.".format(applied))
    if vm_id_to_stop:
        vm_helper.start_vms(vm_id_to_stop)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_to_stop)
    check_vms(vms)
コード例 #5
0
def test_vm_vcpu_model(vcpu_model, vcpu_source, boot_source,
                       cpu_models_supported):
    """
    Test vcpu model specified in flavor will be applied to vm. In case host does not support specified vcpu model,
    proper error message should be displayed in nova show.

    Args:
        vcpu_model
        vcpu_source
        boot_source

    Test Steps:
        - Set flavor extra spec or image metadata with given vcpu model.
        - Boot a vm from volume/image
        - Stop and then start vm and ensure that it retains its cpu model
        - If vcpu model is supported by host,
            - Check vcpu model specified in flavor/image is used by vm via virsh, ps aux (and /proc/cpuinfo)
            - Live migrate vm and check vcpu model again
            - Cold migrate vm and check vcpu model again
        - If vcpu model is not supported by host, check proper error message is included if host does not
            support specified vcpu model.
    Teardown:
        - Delete created vm, volume, image, flavor

    """
    cpu_models_multi_host, all_cpu_models_supported = cpu_models_supported
    flv_model = vcpu_model if vcpu_source == 'flavor' else None
    img_model = vcpu_model if vcpu_source == 'image' else None
    code, vm, msg = _boot_vm_vcpu_model(flv_model=flv_model,
                                        img_model=img_model,
                                        boot_source=boot_source)

    is_supported = (not vcpu_model) or (vcpu_model == 'Passthrough') or (
        vcpu_model in all_cpu_models_supported)
    if not is_supported:
        LOG.tc_step(
            "Check vm in error state due to vcpu model unsupported by hosts.")
        assert 1 == code, "boot vm cli exit code is not 1. Actual fail reason: {}".format(
            msg)

        expt_fault = VCPUSchedulerErr.CPU_MODEL_UNAVAIL
        res_bool, vals = vm_helper.wait_for_vm_values(vm,
                                                      10,
                                                      regex=True,
                                                      strict=False,
                                                      status='ERROR')
        err = vm_helper.get_vm_fault_message(vm)

        assert res_bool, "VM did not reach expected error state. Actual: {}".format(
            vals)
        assert re.search(expt_fault, err), "Incorrect fault reported. Expected: {} Actual: {}" \
            .format(expt_fault, err)
        return

    # System supports specified vcpu, continue to verify
    expt_arch = None
    if vcpu_model == 'Passthrough':
        host = vm_helper.get_vm_host(vm)
        expt_arch = host_helper.get_host_cpu_model(host)

    LOG.tc_step("Check vm is launched with expected vcpu model")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    multi_hosts_supported = (not vcpu_model) or (vcpu_model in cpu_models_multi_host) or \
                            (vcpu_model == 'Passthrough' and cpu_models_multi_host)
    # TC5141
    LOG.tc_step(
        "Stop and then restart vm and check if it retains its vcpu model")
    vm_helper.stop_vms(vm)
    vm_helper.start_vms(vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm_id=vm, vcpu_model=vcpu_model, expt_arch=expt_arch)

    if not multi_hosts_supported:
        LOG.info(
            "Skip migration steps. Less than two hosts in same storage aggregate support {}"
            .format(vcpu_model))
        return

    LOG.tc_step(
        "Live (block) migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.live_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)

    LOG.tc_step("Cold migrate vm and check {} vcpu model".format(vcpu_model))
    vm_helper.cold_migrate_vm(vm_id=vm)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
    check_vm_cpu_model(vm, vcpu_model, expt_arch=expt_arch)
コード例 #6
0
ファイル: test_cpu_policy.py プロジェクト: starlingx/test
def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    image_id = glance_helper.get_image_id_from_name(
        GuestImages.DEFAULT['guest'], strict=True)
    if cpu_pol is not None:
        if pol_source == 'flavor':
            specs = {FlavorSpec.CPU_POLICY: cpu_pol}

            LOG.tc_step("Set following extra specs: {}".format(specs))
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            image_meta = {ImageMetadata.CPU_POLICY: cpu_pol}
            LOG.tc_step(
                "Create image with following metadata: {}".format(image_meta))
            image_id = glance_helper.create_image(
                name='cpu_pol_{}'.format(cpu_pol),
                cleanup='function',
                **image_meta)[1]
    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol),
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step(
        "Boot a vm from {} with above flavor and check vm topology is as "
        "expected".format(boot_source))
    vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus),
                              flavor=flavor_id,
                              source=boot_source,
                              source_id=source_id,
                              cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Suspend/Resume vm and check vm topology stays the same")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Stop/Start vm and check vm topology stays the same")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    prev_siblings = check_helper.check_topology_of_vm(
        vm_id,
        vcpus=flv_vcpus,
        cpu_pol=cpu_pol,
        vm_host=vm_host,
        prev_total_cpus=prev_cpus[vm_host])[1]

    LOG.tc_step("Live migrate vm and check vm topology stays the same")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host],
                                      prev_siblings=prev_siblings)

    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
コード例 #7
0
def test_detection_of_failed_instance(launch_instances):
    con_ssh = ssh.ControllerClient.get_active_controller()
    start_date_cmd = ("python -c \"import datetime; "
                      "print str(datetime.datetime.now())[:-3]\"")
    kill_cmd = (start_date_cmd + "&& sudo pkill -SIGKILL qemu")
    vm_host = vm_helper.get_vm_host(launch_instances)
    vm_name = vm_helper.get_vm_name_from_id(launch_instances)
    end_date_cmd = ("grep -r \"{}\" /var/log/nfv-vim.log | "
                    "grep \"powering-off\" | "
                    "tail -1 | "
                    "awk '{{print$1}}'".format(vm_name))

    res = list()

    for i in range(20):
        LOG.tc_step("Start of iter {}".format(i))
        try:
            st = str()
            et = str()

            vm_helper.get_vms()

            with host_helper.ssh_to_host(vm_host, con_ssh=con_ssh) as con_0_ssh:
                end_time = time.time() + 120
                while time.time() < end_time:
                    con_0_ssh.send(cmd="pgrep qemu")
                    con_0_ssh.expect()
                    matches = re.findall("\n([0-9]+)\n", con_0_ssh.cmd_output)
                    time.sleep(5)
                    if matches:
                        break
                else:
                    raise exceptions.TimeoutException("Timed out waiting for qemu process")

                con_0_ssh.send(cmd=kill_cmd)
                index = con_0_ssh.expect(["Password:"******"Timed out waiting for end time")

            diff = et_date - st_date
            LOG.info("\nstart time = {}\nend time = {}".format(st, et))
            LOG.info("\ndiff = {}".format(diff))
            res.append(diff)
        finally:
            time.sleep(5)
            vm_helper.start_vms(launch_instances)

    def calc_avg(lst):
        rtrn_sum = datetime.timedelta()
        for i in lst:
            LOG.info("Iter {}: {}".format(lst.index(i), i))
            rtrn_sum += i
        return rtrn_sum/len(lst)

    final_res = calc_avg(res)
    LOG.info("Avg time is : {}".format(final_res))
コード例 #8
0
def test_snat_vm_actions(snat_setups, snat):
    """
    Test VM external access over VM launch, live-migration, cold-migration, pause/unpause, etc

    Args:
        snat_setups (tuple): returns vm id and fip. Enable snat, create vm and attach floating ip.

    Test Setups (module):
        - Find a tenant router that is dvr or non-dvr based on the parameter
        - Enable SNAT on tenant router
        - boot a vm and attach a floating ip
        - Ping vm from NatBox

    Test Steps:
        - Enable/Disable SNAT based on snat param
        - Ping from VM to 8.8.8.8
        - wget <lab_fip> to VM
        - scp from NatBox to VM
        - Live-migrate the VM and verify ping from VM
        - Cold-migrate the VM and verify ping from VM
        - Pause and un-pause the VM and verify ping from VM
        - Suspend and resume the VM and verify ping from VM
        - Stop and start the VM and verify ping from VM
        - Reboot the VM and verify ping from VM

    Test Teardown:
        - Enable snat for next test in the same module     (function)
        - Delete the created vm     (module)
        - Disable snat  (module)

    """
    vm_ = snat_setups[0]
    snat = True if snat == 'snat_enabled' else False
    LOG.tc_step("Update tenant router external gateway to set SNAT to {}".format(snat))
    network_helper.set_router_gateway(enable_snat=snat)

    # Allow router update to complete, since we've seen cases where ping vm pass but ssh fail
    time.sleep(30)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, timeout=60, use_fip=snat)

    LOG.tc_step("Ping from VM {} to 8.8.8.8".format(vm_))
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("wget to VM {}".format(vm_))
    with vm_helper.ssh_to_vm_from_natbox(vm_id=vm_, use_fip=True) as vm_ssh:
        vm_ssh.exec_cmd('wget google.ca', fail_ok=False)

    LOG.tc_step("scp from NatBox to VM {}".format(vm_))
    vm_fip = network_helper.get_external_ips_for_vms(vms=vm_)[0]
    natbox_ssh = NATBoxClient.get_natbox_client()
    natbox_ssh.scp_on_source(source_path='test', dest_user='******', dest_ip=vm_fip, dest_path='/tmp/',
                             dest_password='******', timeout=30)

    LOG.tc_step("Live-migrate the VM and verify ping from VM")
    vm_helper.live_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Cold-migrate the VM and verify ping from VM")
    vm_helper.cold_migrate_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Pause and un-pause the VM and verify ping from VM")
    vm_helper.pause_vm(vm_)
    vm_helper.unpause_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Suspend and resume the VM and verify ping from VM")
    vm_helper.suspend_vm(vm_)
    vm_helper.resume_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Stop and start the VM and verify ping from VM")
    vm_helper.stop_vms(vm_)
    vm_helper.start_vms(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Reboot the VM and verify ping from VM")
    vm_helper.reboot_vm(vm_)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)

    LOG.tc_step("Resize the vm to a flavor with 2 dedicated cpus and verify ping from VM")
    new_flv = nova_helper.create_flavor(name='ded', vcpus=2)[1]
    ResourceCleanup.add('flavor', new_flv, scope='module')
    nova_helper.set_flavor(new_flv, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    vm_helper.resize_vm(vm_, new_flv)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_, use_fip=snat)
    vm_helper.ping_ext_from_vm(vm_, use_fip=True)
コード例 #9
0
def perform_vm_operation(vm_type,
                         vm_id,
                         op='live_migration',
                         extra_specs='vtpm'):
    LOG.info('Perform action:{} to the VM, extra specs:{}'.format(
        op, extra_specs))

    op_table = {
        'live_migration':
        lambda x, y: vm_helper.live_migrate_vm(y),
        'cold_migration':
        lambda x, y: vm_helper.cold_migrate_vm(y),
        'stop_start':
        lambda x, y: (vm_helper.stop_vms(y), vm_helper.start_vms(y)),
        'suspend_resume':
        lambda x, y: (vm_helper.suspend_vm(y), vm_helper.resume_vm(y)),
        'pause_unpause':
        lambda x, y: (vm_helper.pause_vm(y), vm_helper.unpause_vm(y)),
        'reboot_host':
        lambda x, y: reboot_hosting_node(x, y, force_reboot=False),
        'soft_reboot':
        lambda x, y: vm_helper.reboot_vm(y, hard=False),
        'hard_reboot':
        lambda x, y: vm_helper.reboot_vm(y, hard=True),
        'lock_unlock':
        lambda x, y: lock_unlock_hosting_node(x, y, force_lock=False),
        'evacuate':
        lambda x, y: reboot_hosting_node(x, y, force_reboot=True),
    }

    if op in op_table:
        LOG.info('Perform action: {}'.format(op))
        op_table[op](vm_type, vm_id)

        return True

    elif op == 'resize_to_autorc':
        if vm_type == 'autorc':
            LOG.info(
                'resize from AUTO-RECOVERY to another AUTO-RECOVER flavor')

        to_flavor_id = get_flavor_id(vm_type, 'autorc2')

        LOG.info('TODO: {}, m_type={}, to_flavor_id={}'.format(
            to_flavor_id, vm_type, to_flavor_id))

        vm_helper.resize_vm(vm_id, to_flavor_id)

    elif op == 'resize_to_non_autorc':
        LOG.info('perform {} on type:{}, id:{}'.format(op, vm_type, vm_id))
        if vm_type == 'non_autorc2':
            LOG.warn(
                'resize from AUTO-RECOVERY to another AUTO-RECOVER flavor')

        to_flavor_id = get_flavor_id(vm_type, 'non_autorc2')
        vm_helper.resize_vm(vm_id, to_flavor_id)

    elif op == 'resize_to_non_vtpm':
        LOG.info('perform {} on type:{}, id:{}'.format(op, vm_type, vm_id))

        to_flavor_id = get_flavor_id(vm_type, 'non_vtpm')

        vm_helper.resize_vm(vm_id, to_flavor_id)

    else:
        LOG.fatal('Unsupported action: {}'.format(op))
        return False