예제 #1
0
def test_boot_windows_guest():
    """
    Boot a windows guest to assist for manual testing on windows guest
    """
    # Change the following parameters to change the vm type.
    guest = 'win_2012'  # such as tis-centos-guest
    storage = 'local_image'  # local_lvm, local_image, or remote
    boot_source = 'image'  # volume or image

    LOG.tc_step("Get/Create {} glance image".format(guest))
    glance_helper.get_guest_image(guest_os=guest)

    LOG.tc_step("Create flavor with {} storage backing".format(storage))
    flv_id = nova_helper.create_flavor(name='{}-{}'.format(storage, guest),
                                       vcpus=4,
                                       ram=8192,
                                       storage_backing=storage,
                                       guest_os=guest)[1]
    nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    LOG.tc_step("Boot {} vm".format(guest))
    vm_id = vm_helper.boot_vm(name='{}-{}'.format(guest, storage),
                              flavor=flv_id,
                              guest_os=guest,
                              source=boot_source)[1]

    LOG.tc_step("Ping vm and ssh to it")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        code, output = vm_ssh.exec_cmd('pwd', fail_ok=False)
        LOG.info(output)

    LOG.info(
        "{} is successfully booted from {} with {} storage backing".format(
            guest, boot_source, storage))
예제 #2
0
def test_boot_ge_edge_uefi():
    guest = 'ge_edge'
    LOG.tc_step("Get ge_edge guest image from test server and create glance image with uefi property")
    glance_helper.get_guest_image(guest_os=guest, rm_image=True)

    LOG.tc_step("Create a flavor for ge_edge vm")
    flavor = nova_helper.create_flavor(guest_os=guest)[1]

    LOG.tc_step("Launch a GE_EDGE vm with UEFI boot")
    vm_helper.boot_vm(name='ge_edge_uefi', flavor=flavor, guest_os=guest)
def _vms():
    vm_helper.ensure_vms_quotas(vms_num=8)
    glance_helper.get_guest_image(guest_os=GUEST_OS, cleanup='module')

    LOG.fixture_step("Create a favor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated-ubuntu',
                                          guest_os=GUEST_OS)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    if len(tenant_net_ids) < VMS_COUNT:
        tenant_net_ids += tenant_net_ids
    assert len(tenant_net_ids) >= VMS_COUNT

    vif = 'avp' if system_helper.is_avs() else 'virtio'
    vm_vif_models = {
        'virtio_vm1': ('virtio', tenant_net_ids[0]),
        '{}_vm1'.format(vif): (vif, tenant_net_ids[1]),
        'virtio_vm2': ('virtio', tenant_net_ids[2]),
        '{}_vm2'.format(vif): (vif, tenant_net_ids[3])
    }

    vms = []
    for vm_name, vifs in vm_vif_models.items():
        vif_model, tenant_net_id = vifs
        nics = [{
            'net-id': mgmt_net_id
        }, {
            'net-id': tenant_net_id,
            'vif-model': vif_model
        }, {
            'net-id': internal_net_id,
            'vif-model': vif_model
        }]

        LOG.fixture_step(
            "Boot a ubuntu14 vm with {} nics from above flavor and volume".
            format(vif_model))
        vm_id = vm_helper.boot_vm(vm_name,
                                  flavor=flavor_id,
                                  source='volume',
                                  cleanup='module',
                                  nics=nics,
                                  guest_os=GUEST_OS)[1]
        vms.append(vm_id)

    return vms
예제 #4
0
def check_fs_sufficient(guest_os, boot_source='volume'):
    """
    Check if volume pool, image storage, and/or image conversion space is sufficient to launch vm
    Args:
        guest_os (str): e.g., tis-centos-guest, win_2016
        boot_source (str): volume or image

    Returns (str): image id

    """
    LOG.info(
        "Check if storage fs is sufficient to launch boot-from-{} vm with {}".
        format(boot_source, guest_os))
    # if guest_os in ['opensuse_12', 'win_2016'] and boot_source == 'volume':
    #     if not cinder_helper.is_volumes_pool_sufficient(min_size=35):
    #         skip(SkipStorageSpace.SMALL_CINDER_VOLUMES_POOL)

    # if guest_os == 'win_2016' and boot_source == 'volume':
    #     if not glance_helper.is_image_conversion_sufficient(guest_os=guest_os):
    #         skip(SkipStorageSpace.INSUFFICIENT_IMG_CONV.format(guest_os))

    check_disk = True if 'win' in guest_os else False
    cleanup = None if re.search(
        'ubuntu_14|{}'.format(GuestImages.TIS_GUEST_PATTERN),
        guest_os) else 'function'
    img_id = glance_helper.get_guest_image(guest_os,
                                           check_disk=check_disk,
                                           cleanup=cleanup)
    return img_id
예제 #5
0
def create_rt_vm(hypervisor):
    global testable_hypervisors
    LOG.tc_step('Create/get glance image using rt guest image')
    image_id = glance_helper.get_guest_image(guest_os='tis-centos-guest-rt',
                                             cleanup='module')

    vcpu_count = VM_CPU_NUM
    non_rt_core = 0
    LOG.tc_step(
        'Create a flavor with specified cpu model, cpu policy, realtime mask, and 2M pagesize'
    )
    flavor_id, storage_backing = nova_helper.create_flavor(
        ram=1024, vcpus=vcpu_count, root_disk=2,
        storage_backing='local_image')[1:3]
    cpu_info = dict(testable_hypervisors[hypervisor]['cpu_info'])
    extra_specs = {
        FlavorSpec.VCPU_MODEL: cpu_info['model'],
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.CPU_REALTIME: 'yes',
        FlavorSpec.CPU_REALTIME_MASK: '^{}'.format(non_rt_core),
        FlavorSpec.MEM_PAGE_SIZE: 2048,
    }
    nova_helper.set_flavor(flavor_id, **extra_specs)

    LOG.tc_step(
        'Boot a VM with rt flavor and image on the targeted hypervisor: {}'.
        format(hypervisor))
    vm_id = vm_helper.boot_vm(flavor=flavor_id,
                              source='image',
                              source_id=image_id,
                              vm_host=hypervisor,
                              cleanup='function')[1]
    return vm_id, vcpu_count, non_rt_core
예제 #6
0
def test_kpi_cinder_volume_creation(collect_kpi):
    """
    KPI test  - cinder  volume creation
    Args:
        collect_kpi:

    Test Steps:
        - Create a 20g cinder volume using default tis guest
        - Collect duration kpi from cinder create cli sent to volume available

    """
    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled.")

    LOG.tc_step(
        "Create a 20g volume from default tis guest and collect image download rate, "
        "image conversion rate, and total volume creation time")
    # init_time = common.get_date_in_format(date_format=KPI_DATE_FORMAT)
    image = glance_helper.get_guest_image(guest_os='tis-centos-guest-qcow2',
                                          cleanup='function')

    vol_id = cinder_helper.create_volume(name='20g',
                                         source_id=image,
                                         size=20,
                                         cleanup='function')[1]
    vol_updated = cinder_helper.get_volume_show_values(
        vol_id, 'updated_at')[0].split('.')[0]

    # Logs no longer available for image downloading and conversion.
    # code_download, out_download = \
    #     kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ImageDownload.NAME,
    #                               host=None,
    #                               log_path=ImageDownload.LOG_PATH,
    #                               end_pattern=ImageDownload.GREP_PATTERN,
    #                               python_pattern=ImageDownload.PYTHON_PATTERN,
    #                               init_time=init_time, uptime=1,
    #                               unit=ImageDownload.UNIT)
    # code_conv, out_conv = \
    #     kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ImageConversion.NAME,
    #                               host=None,
    #                               log_path=ImageConversion.LOG_PATH,
    #                               end_pattern=ImageConversion.GREP_PATTERN,
    #                               python_pattern=ImageConversion.PYTHON_PATTERN,
    #                               init_time=init_time, uptime=1,
    #                               unit=ImageConversion.UNIT)
    code_create, out_create = \
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=VolCreate.NAME, host=None,
                                  log_path=VolCreate.LOG_PATH, end_pattern=vol_updated,
                                  start_pattern=VolCreate.START, uptime=1)
    #
    # assert code_download == 0, out_download
    # assert code_conv == 0, out_conv
    assert code_create == 0, out_create
예제 #7
0
def test_boot_vms():

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id
    }]

    for guest_os in ['ubuntu_14', 'cgcs-guest']:
        glance_helper.get_guest_image(guest_os)
        vm_id = vm_helper.boot_vm(guest_os=guest_os, nics=nics)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

        time.sleep(30)
        vm_helper.ping_vms_from_vm(vm_id,
                                   vm_id,
                                   net_types=['mgmt', 'data', 'internal'])
예제 #8
0
def check_fs_sufficient(guest_os, boot_source='volume'):
    """
    Check if volume pool, image storage, and/or image conversion space is
    sufficient to launch vm
    Args:
        guest_os (str): e.g., tis-centos-guest, win_2016
        boot_source (str): volume or image

    Returns (str): image id

    """
    LOG.info("Check if storage fs is sufficient to launch boot-from-{} vm "
             "with {}".format(boot_source, guest_os))
    check_disk = True if 'win' in guest_os else False
    cleanup = None if re.search(
        'ubuntu_14|{}'.format(GuestImages.TIS_GUEST_PATTERN),
        guest_os) else 'function'
    img_id = glance_helper.get_guest_image(guest_os,
                                           check_disk=check_disk,
                                           cleanup=cleanup)
    return img_id
예제 #9
0
def _boot_vm_vcpu_model(flv_model=None,
                        img_model=None,
                        boot_source='volume',
                        avail_zone=None,
                        vm_host=None):
    LOG.tc_step(
        "Attempt to launch vm from {} with image vcpu model metadata: {}; flavor vcpu model extra spec: {}"
        .format(boot_source, img_model, flv_model))

    flv_id = nova_helper.create_flavor(name='vcpu_{}'.format(flv_model))[1]
    ResourceCleanup.add('flavor', flv_id)
    if flv_model:
        nova_helper.set_flavor(flavor=flv_id,
                               **{FlavorSpec.VCPU_MODEL: flv_model})

    if img_model:
        image_id = glance_helper.create_image(
            name='vcpu_{}'.format(img_model),
            cleanup='function',
            **{ImageMetadata.CPU_MODEL: img_model})[1]
    else:
        image_id = glance_helper.get_guest_image(
            guest_os=GuestImages.DEFAULT['guest'])

    if boot_source == 'image':
        source_id = image_id
    else:
        source_id = cinder_helper.create_volume(name='vcpu_model',
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)

    code, vm, msg = vm_helper.boot_vm(name='vcpu_model',
                                      flavor=flv_id,
                                      source=boot_source,
                                      source_id=source_id,
                                      fail_ok=True,
                                      cleanup='function',
                                      avail_zone=avail_zone,
                                      vm_host=vm_host)
    return code, vm, msg
예제 #10
0
def test_interface_attach_detach_max_vnics(guest_os, if_attach_arg, vifs,
                                           check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach to maximum vnics

    Setups:
        - Boot a base vm with mgmt net and internal0-net1   (module)

    Test Steps:
        - Boot a vm with only mgmt interface
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - Perform VM action - Cold migrate, live migrate, pause resume, suspend resume
        - Verify ping between base_vm and vm_under_test over mgmt & tenant network after vm operation
        - detach all the tenant interface
        - Repeat attach/detach after performing each vm action

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """
    if guest_os == 'vxworks' and not system_helper.is_avs():
        skip('e1000 vif unsupported by OVS')

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm

    glance_vif = None
    if not (if_attach_arg == 'port_id' and system_helper.is_avs()):
        for vif in vifs:
            if vif[0] in ('e1000', 'rtl8139'):
                glance_vif = vif[0]
                break

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if (not glance_vif and re.search(
        GuestImages.TIS_GUEST_PATTERN, guest_os)) else 'function'
    image_id = glance_helper.get_guest_image(
        guest_os=guest_os,
        cleanup=cleanup,
        use_existing=False if cleanup else True)

    if glance_vif:
        glance_helper.set_image(image_id,
                                hw_vif_model=glance_vif,
                                new_name='{}_{}'.format(guest_os, glance_vif))

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=1,
                                          guest_os=guest_os,
                                          cleanup='function')[1]

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    code, vol_id = cinder_helper.create_volume(name='vol-' + guest_os,
                                               source_id=image_id,
                                               fail_ok=True,
                                               guest_image=guest_os,
                                               cleanup='function')
    assert 0 == code, "Issue occurred when creating volume"
    source_id = vol_id

    LOG.tc_step("Boot a vm with mgmt nic only")
    vm_under_test = vm_helper.boot_vm(name='if_attach_tenant',
                                      nics=[mgmt_nic],
                                      source_id=source_id,
                                      flavor=flavor_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]
    prev_port_count = 1
    for vm_actions in [['live_migrate'], ['cold_migrate'],
                       ['pause', 'unpause'], ['suspend', 'resume'],
                       ['stop', 'start']]:
        tenant_port_ids = []
        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Attach specified vnics to the VM before {} and bring up interfaces"
                .format(vm_actions))
            expt_vnics = 1
            for vif in vifs:
                vif_model, vif_count = vif
                expt_vnics += vif_count
                LOG.info("iter {}".format(vif_count))
                for i in range(vif_count):
                    if if_attach_arg == 'port_id':
                        vif_model = vif_model if system_helper.is_avs(
                        ) else None
                        port = network_helper.create_port(
                            net_id=tenant_net_id,
                            wrs_vif=vif_model,
                            cleanup='function',
                            name='attach_{}_{}'.format(vif_model, i))[1]
                        kwargs = {'port_id': port}
                    else:
                        kwargs = {'net_id': tenant_net_id}
                    tenant_port_id = vm_helper.attach_interface(
                        vm_under_test, **kwargs)[1]
                    tenant_port_ids.append(tenant_port_id)
                LOG.info(
                    "Attached new vnics to the VM {}".format(tenant_port_ids))

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            LOG.info("vnics attached to VM: {}".format(vm_ports_count))
            assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

            LOG.info(
                "Bring up all the attached new vifs {} on tenant net from vm".
                format(vifs))
            _bring_up_attached_interface(vm_under_test,
                                         ports=tenant_port_ids,
                                         guest_os=guest_os,
                                         base_vm=base_vm_id)

            if expt_vnics == 16:
                LOG.tc_step(
                    "Verify no more vnic can be attached after reaching upper limit 16"
                )
                res = vm_helper.attach_interface(vm_under_test,
                                                 net_id=tenant_net_id,
                                                 fail_ok=True)[0]
                assert res == 1, "vnics attach exceed maximum limit"

        if vm_actions[0] == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, then verify ping from "
                "base vm over management and data networks")
            vm_helper.set_vm_state(vm_id=vm_under_test,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
            # if 'vxworks' not in guest_os:
            #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)
        else:
            LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                vm_under_test, vm_actions))
            for action in vm_actions:
                vm_helper.perform_action_on_vm(vm_under_test, action=action)
                if action == 'cold_migrate' or action == 'start':
                    LOG.tc_step(
                        "Bring up all the attached tenant interface from vm after {}"
                        .format(vm_actions))
                    # if 'vxworks' not in guest_os:
                    #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_id,
                                       net_types=['mgmt', 'data'],
                                       retry=10)

            LOG.tc_step("Detach all attached interface {} after {}".format(
                tenant_port_ids, vm_actions))
            for tenant_port_id in tenant_port_ids:
                vm_helper.detach_interface(vm_id=vm_under_test,
                                           port_id=tenant_port_id,
                                           cleanup_route=True)

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            assert prev_port_count == vm_ports_count, "VM ports still listed after interface-detach"
            res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                             from_vm=vm_under_test,
                                             fail_ok=True,
                                             net_types=['data'],
                                             retry=0)[0]
            assert not res, "Detached interface still works"
예제 #11
0
def test_interface_attach_detach_on_paused_vm(guest_os, boot_source, vifs,
                                              check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach on stopped vm

    Setups:
        - Boot a base vm with mgmt net and tenant_port_id (module)

    Test Steps:
        - Boot a vm with mgmt and avp port interface
        - Pause the vm
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - perform live migration on paused vm
        - unpause the vm
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - detach all the tenant interface
        - Verify ping to tenant interfaces fail

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm
    LOG.tc_step("Create a avp port")
    init_port_id = network_helper.create_port(tenant_net_id,
                                              'tenant_avp_port',
                                              wrs_vif='avp',
                                              cleanup='function')[1]
    tenant_net_nic = {'port-id': init_port_id, 'vif-model': 'avp'}

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'module'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    LOG.tc_step(
        "Boot a {} vm and flavor from {} with a mgmt and a data interface".
        format(guest_os, boot_source))
    vm_under_test = vm_helper.boot_vm('if_attach-{}-{}'.format(
        guest_os, boot_source),
                                      nics=[mgmt_nic, tenant_net_nic],
                                      source=boot_source,
                                      image_id=image_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]

    _ping_vm_data(vm_under_test=vm_under_test,
                  base_vm_id=base_vm_id,
                  action='boot')

    LOG.tc_step(
        "Pause vm {} before attaching interfaces".format(vm_under_test))
    vm_helper.perform_action_on_vm(vm_under_test, action='pause')

    LOG.tc_step("Create and attach vnics to the VM: {}".format(vifs))
    tenant_port_ids = network_helper.get_ports(server=vm_under_test,
                                               network=tenant_net_id)
    expt_vnics = 2
    new_vnics = 0
    for vif in vifs:
        vif_model, vif_count = vif
        expt_vnics += vif_count
        LOG.info("iter {}".format(vif_count))
        LOG.info("Create and attach {} {} vnics to vm {}".format(
            vif_count, vif_model, vm_under_test))
        for i in range(vif_count):
            name = 'attached_port-{}_{}'.format(vif_model, i)
            port_id = network_helper.create_port(net_id=tenant_net_id,
                                                 name=name,
                                                 wrs_vif=vif_model,
                                                 cleanup='function')[1]
            vm_helper.attach_interface(vm_under_test, port_id=port_id)
            new_vnics += 1
            tenant_port_ids.append(port_id)

    vm_ports_count = len(network_helper.get_ports(server=vm_under_test))
    LOG.info("vnics attached to VM: {}".format(vm_ports_count))
    assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

    if expt_vnics == 16:
        res = vm_helper.attach_interface(vm_under_test,
                                         net_id=tenant_net_id,
                                         fail_ok=True)[0]
        assert res == 1, "vnics attach exceed maximum limit"

    LOG.tc_step("Live migrate paused vm")
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')

    LOG.tc_step(
        "Unpause live-migrated vm, bring up attached interfaces and ping the VM"
    )
    vm_helper.perform_action_on_vm(vm_under_test, action='unpause')
    _bring_up_attached_interface(
        vm_under_test,
        guest_os=guest_os,
        ports=tenant_port_ids,
        base_vm=base_vm_id,
        action='pause, attach interfaces, live migrate and unpause')

    LOG.tc_step("Live migrate again after unpausing the vm")
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')
    _ping_vm_data(vm_under_test, base_vm_id, action='live migrate')

    LOG.tc_step("Detach ports: {}".format(tenant_port_ids))
    for tenant_port_id in tenant_port_ids:
        vm_helper.detach_interface(vm_id=vm_under_test, port_id=tenant_port_id)
        new_vnics -= 1

    res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                     from_vm=vm_under_test,
                                     fail_ok=True,
                                     net_types=['data'],
                                     retry=0)[0]
    assert not res, "Ping from base_vm to vm via detached interface still works"

    LOG.tc_step(
        "Attach single interface with tenant id {}".format(tenant_net_id))
    port_id = vm_helper.attach_interface(vm_under_test,
                                         net_id=tenant_net_id)[1]
    new_vnics += 1

    LOG.tc_step(
        "Live migrate vm after detach/attach, bring up interfaces and ensure ping works"
    )
    vm_helper.perform_action_on_vm(vm_under_test, action='live_migrate')
    _bring_up_attached_interface(vm_under_test,
                                 guest_os=guest_os,
                                 ports=[port_id],
                                 base_vm=base_vm_id,
                                 action='attach interface and live migrate')
예제 #12
0
def test_vm_with_max_vnics_attached_during_boot(base_vm, guest_os, nic_arg,
                                                boot_source):
    """
    Setups:
        - Boot a base vm with mgmt net and tenant_port_id (module)

    Test Steps:
        - Boot a vm with 1 mgmt and 15 avp/virtio Interfaces
        - Perform nova action (live migrate --force, live migrate, rebuild, reboot hard/soft, resize revert, resize)
        - ping between base_vm and vm_under_test over mgmt & tenant network

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm
    vif_type = 'avp' if system_helper.is_avs() else None

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'function'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    # TODO Update vif model config. Right now vif model avp still under implementation
    nics = [mgmt_nic]
    for i in range(15):
        if nic_arg == 'port_id':
            port_id = network_helper.create_port(tenant_net_id,
                                                 'tenant_port-{}'.format(i),
                                                 wrs_vif=vif_type,
                                                 cleanup='function')[1]
            nics.append({'port-id': port_id})
        else:
            nics.append({'net-id': tenant_net_id, 'vif-model': vif_type})

    LOG.tc_step(
        "Boot a {} vm and flavor from {} with 1 mgmt and 15 data interfaces".
        format(guest_os, boot_source))
    vm_under_test = vm_helper.boot_vm('max_vifs-{}-{}'.format(
        guest_os, boot_source),
                                      nics=nics,
                                      source=boot_source,
                                      image_id=image_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]

    vm_ports_count = len(network_helper.get_ports(server=vm_under_test))
    expt_vnics = 16
    LOG.info("vnics attached to VM: {}".format(vm_ports_count))
    assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

    _ping_vm_data(vm_under_test, vm_under_test, action='boot')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='configure routes')

    destination_host = vm_helper.get_dest_host_for_live_migrate(
        vm_id=vm_under_test)
    if destination_host:
        # LOG.tc_step("Perform following action(s) on vm {}: {}".format(vm_under_test, 'live-migrate --force'))
        # vm_helper.live_migrate_vm(vm_id=vm_under_test, destination_host=destination_host, force=True)
        # _ping_vm_data(vm_under_test, base_vm_id, action='live migrate --force')

        LOG.tc_step("Perform following action(s) on vm {}: {}".format(
            vm_under_test, 'live-migrate'))
        vm_helper.live_migrate_vm(vm_id=vm_under_test)
        _ping_vm_data(vm_under_test, base_vm_id, action='live-migrate')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'hard reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test, hard=True)
    _ping_vm_data(vm_under_test, base_vm_id, action='hard reboot')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'soft reboot'))
    vm_helper.reboot_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='soft rebuild')

    LOG.tc_step('Create destination flavor')
    dest_flavor_id = nova_helper.create_flavor(name='dest_flavor',
                                               vcpus=2,
                                               guest_os=guest_os)[1]

    LOG.tc_step('Resize vm to dest flavor and revert')
    vm_helper.resize_vm(vm_under_test,
                        dest_flavor_id,
                        revert=True,
                        fail_ok=False)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize revert')

    LOG.tc_step('Resize vm to dest flavor and revert False')
    vm_helper.resize_vm(vm_under_test, dest_flavor_id, fail_ok=False)
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='resize')

    LOG.tc_step("Perform following action(s) on vm {}: {}".format(
        vm_under_test, 'rebuild'))
    vm_helper.rebuild_vm(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, vm_under_test, action='rebuild')
    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)
    _ping_vm_data(vm_under_test, base_vm_id, action='rebuild')
예제 #13
0
def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs):
    """
    Ping between two vms with given vif models

    Test Steps:
        - Create a favor with dedicated cpu policy and proper root disk size
        - Create a volume from guest image under test with proper size
        - Boot two vms with given vif models from above volume and flavor
        - Ping VMs from NatBox and between two vms

    Test Teardown:
        - Delete vms, volumes, flavor, glance image created

    """
    if guest_os == 'default':
        guest_os = GuestImages.DEFAULT['guest']

    reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True
    cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None
    image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup,
                                             use_existing=reuse)

    LOG.tc_step("Create a favor dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os,
                                          cleanup='function')[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    net_ids = (mgmt_net_id, tenant_net_id, internal_net_id)
    vms = []
    for vifs_for_vm in (vm1_vifs, vm2_vifs):
        # compose vm nics
        nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id,
                             guest_os=guest_os)
        net_types = ['mgmt', 'data', 'internal'][:len(nics)]
        LOG.tc_step("Create a volume from {} image".format(guest_os))
        vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os),
                                             source_id=image_id,
                                             guest_image=guest_os,
                                             cleanup='function')[1]

        LOG.tc_step(
            "Boot a {} vm with {} vifs from above flavor and volume".format(
                guest_os, vifs_for_vm))
        vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id,
                                  cleanup='function',
                                  source='volume', source_id=vol_id, nics=nics,
                                  guest_os=guest_os)[1]

        LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        vms.append(vm_id)

    LOG.tc_step(
        "Ping between two vms over management, data, and internal networks")
    vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1],
                               net_types=net_types)
    vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0],
                               net_types=net_types)
예제 #14
0
def test_nova_actions(guest_os, cpu_pol, actions):
    """

    Args:
        guest_os:
        cpu_pol:
        actions:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image with specified cpu
        policy
        - Perform given nova actions on vm
        - Ensure nova operation succeeded and vm still in good state (active
        and reachable from NatBox)

    """
    if guest_os == 'opensuse_12':
        if not cinder_helper.is_volumes_pool_sufficient(min_size=40):
            skip(SkipStorageSpace.SMALL_CINDER_VOLUMES_POOL)

    img_id = glance_helper.get_guest_image(guest_os=guest_os)

    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = nova_helper.create_flavor(name=cpu_pol, vcpus=1,
                                          root_disk=9)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    vol_id = \
        cinder_helper.create_volume(name='vol-' + guest_os, source_id=img_id,
                                    guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm('nova_actions',
                              flavor=flavor_id,
                              source='volume',
                              source_id=vol_id,
                              cleanup='function')[1]

    LOG.tc_step("Wait for VM pingable from NATBOX")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    for action in actions:
        if action == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, "
                "then verify ping from base vm over "
                "management and data networks")
            vm_helper.set_vm_state(vm_id=vm_id,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_id,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
        else:
            LOG.tc_step("Perform following action on vm {}: {}".format(
                vm_id, action))
            vm_helper.perform_action_on_vm(vm_id, action=action)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
예제 #15
0
def test_vm_with_config_drive(hosts_per_stor_backing):
    """
    Skip Condition:
        - no host with local_image backend

    Test Steps:
        - Launch a vm using config drive
        - Add test data to config drive on vm
        - Do some operations (reboot vm for simplex, cold migrate and lock host for non-simplex) and
            check test data persisted in config drive after each operation
    Teardown:
        - Delete created vm, volume, flavor

    """
    guest_os = 'cgcs-guest'
    # guest_os = 'tis-centos-guest'  # CGTS-6782
    img_id = glance_helper.get_guest_image(guest_os)
    hosts_num = len(hosts_per_stor_backing.get('local_image', []))
    if hosts_num < 1:
        skip("No host with local_image storage backing")

    volume_id = cinder_helper.create_volume(name='vol_inst1',
                                            source_id=img_id,
                                            guest_image=guest_os)[1]
    ResourceCleanup.add('volume', volume_id, scope='function')

    block_device = {
        'source': 'volume',
        'dest': 'volume',
        'id': volume_id,
        'device': 'vda'
    }
    vm_id = vm_helper.boot_vm(name='config_drive',
                              config_drive=True,
                              block_device=block_device,
                              cleanup='function',
                              guest_os=guest_os,
                              meta={'foo': 'bar'})[1]

    LOG.tc_step("Confirming the config drive is set to True in vm ...")
    assert str(vm_helper.get_vm_values(vm_id, "config_drive")[0]) == 'True', \
        "vm config-drive not true"

    LOG.tc_step("Add date to config drive ...")
    check_vm_config_drive_data(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    instance_name = vm_helper.get_vm_instance_name(vm_id)
    LOG.tc_step("Check config_drive vm files on hypervisor after vm launch")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)

    if not system_helper.is_aio_simplex():
        LOG.tc_step("Cold migrate VM")
        vm_helper.cold_migrate_vm(vm_id)

        LOG.tc_step("Check config drive after cold migrate VM...")
        check_vm_config_drive_data(vm_id)

        LOG.tc_step("Lock the compute host")
        compute_host = vm_helper.get_vm_host(vm_id)
        HostsToRecover.add(compute_host)
        host_helper.lock_host(compute_host, swact=True)

        LOG.tc_step("Check config drive after locking VM host")
        check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.DHCP_RETRY)
        vm_host = vm_helper.get_vm_host(vm_id)

    else:
        LOG.tc_step("Reboot vm")
        vm_helper.reboot_vm(vm_id)

        LOG.tc_step("Check config drive after vm rebooted")
        check_vm_config_drive_data(vm_id)

    LOG.tc_step("Check vm files exist after nova operations")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)
예제 #16
0
def test_gpu_passthrough(setup_alias):
    """
        Test case for GPU passthrough

    Test Steps:
        - Create pci alias for dev type 'gpu' and 'user'
        - Calculate the initial pf used in 'nova device-list'
        - Create flavor with extra spec with PCI_PASSTHROUGH_ALIAS device gpu & usb
        - Boot a vm with created flavor & gpu passthrough specfic centos image
        - Verify the pf used increased after vm launch


    Teardown:
        - Delete created vm, flavor, pci_alias

    """

    nova_gpu_alias, nova_usb_alias = setup_alias

    # initialize parameter for basic operation
    name = 'gpu_passthrough'
    guest_os = 'centos_gpu'
    pf = 1

    LOG.tc_step("Create a flavor for GPU Passthrough")
    flavor_id = nova_helper.create_flavor(name=name, root_disk=16)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    extra_spec = {
        FlavorSpec.PCI_PASSTHROUGH_ALIAS:
        '{}:{},{}:{}'.format(nova_gpu_alias, pf, nova_usb_alias, pf),
        FlavorSpec.CPU_POLICY:
        'dedicated'
    }

    nova_helper.set_flavor(flavor_id, **extra_spec)

    initial_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    initial_usb_pfs_used = _calculate_pf_used(nova_usb_alias)

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if re.search(GuestImages.TIS_GUEST_PATTERN,
                                guest_os) else 'module'
    image_id = glance_helper.get_guest_image(guest_os=guest_os,
                                             cleanup=cleanup)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()

    mgmt_nic = {'net-id': mgmt_net_id}
    tenant_nic = {'net-id': tenant_net_id}
    nics = [mgmt_nic, tenant_nic]

    LOG.tc_step("Boot a vm  {} with pci-alias and flavor ".format(
        nova_gpu_alias, flavor_id))
    vm_id = vm_helper.boot_vm(name,
                              flavor=flavor_id,
                              source='image',
                              source_id=image_id,
                              nics=nics,
                              cleanup='function')[1]

    actual_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    expected_gpu_pfs_used = initial_gpu_pfs_used + pf
    assert actual_gpu_pfs_used == expected_gpu_pfs_used, "actual gpu pci pfs is not equal to expected pci pfs"

    actual_usb_pfs_used = _calculate_pf_used(nova_usb_alias)
    expected_usb_pfs_used = initial_usb_pfs_used + pf
    assert actual_usb_pfs_used == expected_usb_pfs_used, "actual usb pci pfs is not equal to expected pci pfs"

    LOG.tc_step("Delete vm  {} ".format(vm_id))
    vm_helper.delete_vms(vms=vm_id, stop_first=False)

    actual_gpu_pfs_used = _calculate_pf_used(nova_gpu_alias)
    assert actual_gpu_pfs_used == initial_gpu_pfs_used, \
        "actual gpu pci pfs is not equal to expected pci pfs after vm delete"

    actual_usb_pfs_used = _calculate_pf_used(nova_usb_alias)
    assert actual_usb_pfs_used == initial_usb_pfs_used, \
        "actual usb pci pfs is not equal to expected pci pfs after vm delete"

    LOG.tc_step(
        "Deleting nova service parameter service parameters for gpu & usb")
예제 #17
0
def test_migrate_vm(check_system, guest_os, mig_type, cpu_pol):
    """
    Test migrate vms for given guest type
    Args:
        check_system:
        guest_os:
        mig_type:
        cpu_pol:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image
        - Live/cold migrate the vm
        - Ensure vm moved to other host and in good state (active and
            reachabe from NatBox)

    """
    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = \
        nova_helper.create_flavor(name='{}-mig'.format(mig_type), vcpus=1,
                                  root_disk=9, cleanup='function')[1]

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    image_id = glance_helper.get_guest_image(guest_os=guest_os)

    vol_id = cinder_helper.create_volume(source_id=image_id, size=9,
                                         guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm(guest_os, flavor=flavor_id, source='volume',
                              source_id=vol_id, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if guest_os == 'ubuntu_14':
        system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CINDER_IO_CONGEST,
                                          entity_id='cinder_io_monitor',
                                          strict=False, timeout=300,
                                          fail_ok=False)

    LOG.tc_step("{} migrate vm and check vm is moved to different host".format(
        mig_type))
    prev_vm_host = vm_helper.get_vm_host(vm_id)

    if mig_type == 'live':
        code, output = vm_helper.live_migrate_vm(vm_id)
        if code == 1:
            assert False, "No host to live migrate to. System may not be in " \
                          "good state."
    else:
        vm_helper.cold_migrate_vm(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    assert prev_vm_host != vm_host, "vm host did not change after {} " \
                                    "migration".format(mig_type)

    LOG.tc_step("Ping vm from NatBox after {} migration".format(mig_type))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)