def _setup_vm(vm_ids, hosts_to_boot):
    """
    Set up source and destination vm
    Args:
        vm_ids: List of already booted VMs
        hosts_to_boot: Boot on same compute if same_host is true or in difference host

    Returns:
        vm_ids: append vm_id created
        source_vm_id, dest_vm_id, internal_net_id, mgmt_net_id, mgmt_nic
    """

    internal_net_id = network_helper.get_internal_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    mgmt_nic = {'net-id': mgmt_net_id}
    internal_nic = {'net-id': internal_net_id}
    nics = [mgmt_nic, internal_nic]

    source_vm_id = vm_helper.boot_vm(name='source_vm',
                                     nics=nics,
                                     cleanup='function',
                                     vm_host=hosts_to_boot[0])[1]
    vm_ids.append(source_vm_id)
    dest_vm_id = vm_helper.boot_vm(name='dest_vm',
                                   nics=nics,
                                   cleanup='function',
                                   vm_host=hosts_to_boot[1])[1]
    vm_ids.append(dest_vm_id)
    LOG.info("Source VM {} and Destination VM {} booted".format(
        source_vm_id, dest_vm_id))

    return vm_ids, source_vm_id, dest_vm_id, internal_net_id, mgmt_net_id, mgmt_nic
Exemplo n.º 2
0
def test_boot_ge_edge_uefi():
    guest = 'ge_edge'
    LOG.tc_step("Get ge_edge guest image from test server and create glance image with uefi property")
    glance_helper.get_guest_image(guest_os=guest, rm_image=True)

    LOG.tc_step("Create a flavor for ge_edge vm")
    flavor = nova_helper.create_flavor(guest_os=guest)[1]

    LOG.tc_step("Launch a GE_EDGE vm with UEFI boot")
    vm_helper.boot_vm(name='ge_edge_uefi', flavor=flavor, guest_os=guest)
Exemplo n.º 3
0
def test_create_snapshot_using_boot_from_image_vm():
    """
    This test creates a snapshot from a VM that is booted from image using
    nova image-create.  Nova image-create will create a glance image that can
    be used to boot a VM.

    Assumptions:
    * There are so images available on the system

    Test Steps:
    1.  Boot a vm from image
    2.  Run nova image-create <vm-id> <name> to save a snapshot of a vm in the
        form of a glance image
    3.  Run glance image-download --file <snapshot-img-filename>
        <snapshot-img-uuid> to download the snapshot image
    4.  Delete the downloaded image
    5.  Boot a VM using the snapshot that was created

    Teardown:
    1.  Delete VMs
    2.  Delete snapshots in the form a glance image
    """

    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step("Boot a VM from image")
    vm_id = vm_helper.boot_vm(source="image", cleanup='function')[1]
    assert vm_id, "Failed to boot VM"
    vm_name = vm_helper.get_vm_name_from_id(vm_id)
    snapshot_name = vm_name + "_snapshot"

    # nova image-create generates a glance image
    LOG.tc_step("Create a snapshot based on that VM")
    image_id = vm_helper.create_image_from_vm(vm_id, cleanup='function')[1]

    image_filename = '{}/images/temp'.format(HostLinuxUser.get_home())
    LOG.tc_step("Download the image snapshot")
    glance_cmd = "image save --file {} {}".format(image_filename, image_id)
    # Throw exception if glance cmd rejected
    cli.openstack(glance_cmd, ssh_client=con_ssh, fail_ok=False)

    # Downloading should be good enough for validation.  If the file is
    # zero-size, download will report failure.
    LOG.tc_step("Delete the downloaded image")
    con_ssh.exec_cmd("rm {}".format(image_filename), fail_ok=False)

    # Second form of validation is to boot a VM from the snapshot
    LOG.tc_step("Boot a VM from snapshot")
    snapshot_vm = "from_" + snapshot_name
    vm_helper.boot_vm(name=snapshot_vm,
                      source="image",
                      source_id=image_id,
                      cleanup='function',
                      fail_ok=False)
Exemplo n.º 4
0
def _boot_migrable_vms(storage_backing):
    """
    Create vms with specific storage backing that can be live migrated

    Args:
        storage_backing: 'local_image' or 'remote'

    Returns: (vms_info (list), flavors_created (list))
        vms_info : [(vm_id1, block_mig1), (vm_id2, block_mig2), ...]

    """
    vms_to_test = []
    flavors_created = []
    flavor_no_localdisk = nova_helper.create_flavor(
        ephemeral=0, swap=0, storage_backing=storage_backing)[1]
    flavors_created.append(flavor_no_localdisk)

    vm_1 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='volume')[1]

    block_mig_1 = False
    vms_to_test.append((vm_1, block_mig_1))

    LOG.info(
        "Boot a VM from image if host storage backing is local_image or remote..."
    )
    vm_2 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1]
    block_mig_2 = True
    vms_to_test.append((vm_2, block_mig_2))
    if storage_backing == 'remote':
        LOG.info(
            "Boot a VM from volume with local disks if storage backing is remote..."
        )
        ephemeral_swap = random.choice([[0, 512], [1, 512], [1, 0]])
        flavor_with_localdisk = nova_helper.create_flavor(
            ephemeral=ephemeral_swap[0], swap=ephemeral_swap[1])[1]
        flavors_created.append(flavor_with_localdisk)
        vm_3 = vm_helper.boot_vm(flavor=flavor_with_localdisk,
                                 source='volume')[1]
        block_mig_3 = False
        vms_to_test.append((vm_3, block_mig_3))
        LOG.info(
            "Boot a VM from image with volume attached if storage backing is remote..."
        )
        vm_4 = vm_helper.boot_vm(flavor=flavor_no_localdisk, source='image')[1]
        vm_helper.attach_vol_to_vm(vm_id=vm_4)
        block_mig_4 = False
        vms_to_test.append((vm_4, block_mig_4))

    return vms_to_test, flavors_created
Exemplo n.º 5
0
def test_send_acpi_signal_on_shutdown(nova_action, hard):
    """
    Sample test case for Boot an instance and send acpi signal on shutdown
    Test Steps:
        - Boot a vm with only mgmt interface & tenant interface
        - if hard is set reboot vm with --hard option, for stop/start there is no --hard option
        - ssh to vm & modify /etc/acpi/actions/power.sh file to log message
        - perform nova action using arg 'hard'
        - After nova action verify the message logged in '/var/log/messages'

    Teardown:
        - Delete created vm, volume

    """
    nova_action = nova_action.split('_')
    hard = 1 if 'hard' == hard else 0

    LOG.info("hard option: {}".format(hard))
    LOG.tc_step("Boot a vm")
    vm_under_test = vm_helper.boot_vm(name='send_acpi_signal_to_vm',
                                      cleanup='function')[1]

    LOG.tc_step("Modify gyest acpi file file")
    _modify_guest_acpi_file(vm_id=vm_under_test)

    kwargs = {}
    if hard == 1:
        kwargs = {'hard': True}
    for action in nova_action:
        LOG.tc_step("Perform nova action: {}".format(action))
        vm_helper.perform_action_on_vm(vm_under_test, action=action, **kwargs)

    LOG.tc_step("Verify /var/log/messages file")
    _check_log_messages(vm_id=vm_under_test, hard=hard)
Exemplo n.º 6
0
def vms_(volumes_):
    """
    Text fixture to create cinder volume with specific 'display-name',
    and 'size'
    Args:
        volumes_: list of two large volumes dict created by volumes_ fixture

    Returns: volume dict as following:
        {'id': <volume_id>,
         'display_name': <vol_inst1 or vol_inst2>,
         'size': <20 or 40>
        }
    """
    vms = []
    vm_names = ['test_inst1', 'test_inst2']
    index = 0
    for vol_params in volumes_:
        instance_name = vm_names[index]
        vm_id = vm_helper.boot_vm(
            name=instance_name,
            source='volume',
            source_id=vol_params['id'],
            cleanup='function')[1]  # , user_data=get_user_data_file())[1]
        vm = {
            'id': vm_id,
            'display_name': instance_name,
        }
        vms.append(vm)
        index += 1
    return vms
Exemplo n.º 7
0
def test_boot_windows_guest():
    """
    Boot a windows guest to assist for manual testing on windows guest
    """
    # Change the following parameters to change the vm type.
    guest = 'win_2012'  # such as tis-centos-guest
    storage = 'local_image'  # local_lvm, local_image, or remote
    boot_source = 'image'  # volume or image

    LOG.tc_step("Get/Create {} glance image".format(guest))
    glance_helper.get_guest_image(guest_os=guest)

    LOG.tc_step("Create flavor with {} storage backing".format(storage))
    flv_id = nova_helper.create_flavor(name='{}-{}'.format(storage, guest),
                                       vcpus=4,
                                       ram=8192,
                                       storage_backing=storage,
                                       guest_os=guest)[1]
    nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    LOG.tc_step("Boot {} vm".format(guest))
    vm_id = vm_helper.boot_vm(name='{}-{}'.format(guest, storage),
                              flavor=flv_id,
                              guest_os=guest,
                              source=boot_source)[1]

    LOG.tc_step("Ping vm and ssh to it")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        code, output = vm_ssh.exec_cmd('pwd', fail_ok=False)
        LOG.info(output)

    LOG.info(
        "{} is successfully booted from {} with {} storage backing".format(
            guest, boot_source, storage))
Exemplo n.º 8
0
def test_migrate_stress(check_hypervisors, boot_source, count):

    LOG.tc_step("Launch a VM from {}".format(boot_source))
    vm = vm_helper.boot_vm(name='{}-stress'.format(boot_source), cleanup='function',
                           source=boot_source)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    block_mig = True if boot_source == 'image' else False
    if not block_mig:
        LOG.tc_step("Attempt to block migration on boot-from-volume VM and ensure if fails")
        code = vm_helper.live_migrate_vm(vm_id=vm, block_migrate=True)[0]
        assert code > 0, "Block migration passed unexpectedly for boot-from-volume vm"
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    LOG.tc_step("Live migrate and ping vm 1000 times")
    for i in range(count):
        LOG.info('Live migration iter{}'.format(i+1))
        vm_helper.live_migrate_vm(vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

    LOG.tc_step("Cold migrate vm followed by live migrate {} times".format(count))
    for i in range(count):
        LOG.info('Cold+live migration iter{}'.format(i + 1))
        vm_helper.cold_migrate_vm(vm_id=vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)

        vm_helper.live_migrate_vm(vm)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm)
Exemplo n.º 9
0
def create_rt_vm(hypervisor):
    global testable_hypervisors
    LOG.tc_step('Create/get glance image using rt guest image')
    image_id = glance_helper.get_guest_image(guest_os='tis-centos-guest-rt',
                                             cleanup='module')

    vcpu_count = VM_CPU_NUM
    non_rt_core = 0
    LOG.tc_step(
        'Create a flavor with specified cpu model, cpu policy, realtime mask, and 2M pagesize'
    )
    flavor_id, storage_backing = nova_helper.create_flavor(
        ram=1024, vcpus=vcpu_count, root_disk=2,
        storage_backing='local_image')[1:3]
    cpu_info = dict(testable_hypervisors[hypervisor]['cpu_info'])
    extra_specs = {
        FlavorSpec.VCPU_MODEL: cpu_info['model'],
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.CPU_REALTIME: 'yes',
        FlavorSpec.CPU_REALTIME_MASK: '^{}'.format(non_rt_core),
        FlavorSpec.MEM_PAGE_SIZE: 2048,
    }
    nova_helper.set_flavor(flavor_id, **extra_specs)

    LOG.tc_step(
        'Boot a VM with rt flavor and image on the targeted hypervisor: {}'.
        format(hypervisor))
    vm_id = vm_helper.boot_vm(flavor=flavor_id,
                              source='image',
                              source_id=image_id,
                              vm_host=hypervisor,
                              cleanup='function')[1]
    return vm_id, vcpu_count, non_rt_core
Exemplo n.º 10
0
def test_timing():
    threads = []
    flav_id = nova_helper.create_flavor('thread_testing')[1]
    ResourceCleanup.add(resource_type='flavor', resource_id=flav_id)
    start_1 = time()
    for i in range(0, 6):
        thread = MThread(vm_helper.boot_vm, 'threading_vm', flavor=flav_id)
        thread.start_thread(240)
        threads.append(thread)

    for thread in threads:
        thread.wait_for_thread_end()
    for thread in threads:
        ResourceCleanup.add(resource_type='vm',
                            resource_id=thread.get_output()[1])
    end_1 = time()

    start_2 = time()
    for i in range(0, 2):
        vm_id = vm_helper.boot_vm('loop_vm', flav_id)[1]
        ResourceCleanup.add(resource_type='vm', resource_id=vm_id)
    end_2 = time()

    LOG.info("Time results:\n"
             "Multithreading: {}\n"
             "Single loop: {}".format(end_1 - start_1, end_2 - start_2))
Exemplo n.º 11
0
def test_db_purge():

    end_time = time.time() + 7200

    count = 1
    while time.time() < end_time:

        LOG.tc_step(
            "Iteration-{}: Creating and deleting image, volume, vm".format(
                count))
        LOG.info("------ Creating image, volume, vm")
        image_id = glance_helper.create_image(
            name='glance-purge',
            cleanup='function',
            **{ImageMetadata.AUTO_RECOVERY: 'true'})[1]
        vol_id = cinder_helper.create_volume(name='cinder-purge',
                                             source_id=image_id)[1]
        vm_id = vm_helper.boot_vm(name='nova-purge',
                                  source='volume',
                                  source_id=vol_id)[1]

        time.sleep(60)

        LOG.info("------ Deleting vm, volume, image")
        vm_helper.delete_vms(vms=vm_id)
        cinder_helper.delete_volumes(volumes=vol_id)
        glance_helper.delete_images(images=image_id)

        time.sleep(60)
        count += 1
def test_ea_vm_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify guest can be launched with  one crypto VF, AVP, VIRTIO, and SRIOV interfaces.
    Verify device cannot be disabled while on use. ( mainly for labs with two computes)
    Args:
        _flavors:
        hosts_pci_device_info:

    """
    # hosts = list(hosts_pci_device_info.keys())
    vm_name = 'vm_with_pci_device'
    mgmt_net_id = network_helper.get_mgmt_net_id()

    nics = [{'net-id': mgmt_net_id}]

    flavor_id = _flavors['flavor_qat_vf_1']
    LOG.tc_step("Boot a vm  {} with pci-sriov nics and flavor flavor_qat_vf_1".format(vm_name))
    vm_id = vm_helper.boot_vm(vm_name, flavor=flavor_id, nics=nics, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    LOG.info("VM {} booted successfully and become active with crypto VF".format(vm_name))

    vm_host = vm_helper.get_vm_host(vm_id)
    pci_dev_info = hosts_pci_device_info[vm_host][0]['pci_address']
    # device_address = pci_dev_info['pci_address']
    host_dev_name = pci_dev_info['device_name']
    expt_qat_devs = {host_dev_name: 1}
    check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs)

    _perform_nova_actions(vms_dict={vm_name: vm_id}, flavors=_flavors)
    check_helper.check_qat_service(vm_id=vm_id, qat_devs=expt_qat_devs)
Exemplo n.º 13
0
def test_boot_and_ping_vm(guest_os, opensuse11_image, opensuse12_image,
                          opensuse13_image, rhel6_image, rhel7_image):

    vm_id = vm_helper.boot_vm(guest_os=guest_os,
                              source='image',
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Exemplo n.º 14
0
def _test_check_vm_disk_on_compute(storage, hosts_per_backing):

    """
        Tests that existence of volumes are properly reported for lvm-backed vms.

        Skip:
            - Skip if no lvm-configured compute nodes available

        Test steps:
            - Create a flavor for a lvm-backed vms and boot vm out of that flavor
            - SSH onto the node hosting the VM and do the following:
                - Run ps aux and confirm that there is a qemu process
                - Run sudo lvs and confirm the existence of a thin pool
                - Run sudo lvs and confirm the existence of a volume for the vm
            - Ensure that the "free" space shown for the hypervisor (obtained by running
                "nova hypervisor-show <compute node>" and then checking the "free_disk_gb" field)
                reflects the space available within the thin pool
            - Delete the instance and ensure that space is returned to the hypervisor

        Test Teardown:
            - Delete created VM if not already done

    """

    hosts_with_backing = hosts_per_backing.get(storage, [])
    if not hosts_with_backing:
        skip(SkipStorageBacking.NO_HOST_WITH_BACKING.format(storage))

    LOG.tc_step("Create flavor and boot vm")
    flavor = nova_helper.create_flavor(storage_backing=storage)[1]
    ResourceCleanup.add('flavor', flavor, scope='function')
    vm = vm_helper.boot_vm(source='image', flavor=flavor, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm)
    vm_host = vm_helper.get_vm_host(vm)

    with host_helper.ssh_to_host(vm_host) as compute_ssh:
        LOG.tc_step("Look for qemu process")
        compute_ssh.exec_sudo_cmd(cmd="lvs --units g")
        assert check_for_qemu_process(compute_ssh), "qemu process not found when calling ps"

        LOG.tc_step("Look for pool information")
        thin_pool_size = get_initial_pool_space(compute_ssh, vm)

        vm_vol_name = vm + '_disk'
        raw_vm_volume_output = \
            compute_ssh.exec_sudo_cmd(cmd="lvs --units g --noheadings -o lv_size -S lv_name={}".format(vm_vol_name))[1]
        assert raw_vm_volume_output, "created vm volume not found"
        vm_volume_size = float(raw_vm_volume_output.strip('<g'))

    LOG.tc_step("Calculate compute free disk space and ensure that it reflects thin pool")
    expected_space_left = int(thin_pool_size - vm_volume_size)
    free_disk_space = get_compute_free_disk_gb(vm_host)
    assert expected_space_left - 1 <= free_disk_space <= expected_space_left + 1, \
        'Hypervisor-show does not reflect space within thin pool'

    LOG.tc_step("Calculate free space following vm deletion (ensure volume space is returned)")
    vm_helper.delete_vms(vm)
    free_disk_space = get_compute_free_disk_gb(vm_host)
    assert int(thin_pool_size) == free_disk_space, \
        'Space is not properly returned to the hypervisor or hypervisor info does not properly reflect it'
Exemplo n.º 15
0
    def base_setup(self):

        flavor_id = nova_helper.create_flavor(name='dedicated')[1]
        ResourceCleanup.add('flavor', flavor_id, scope='class')

        extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

        mgmt_net_id = network_helper.get_mgmt_net_id()
        tenant_net_id = network_helper.get_tenant_net_id()
        internal_net_id = network_helper.get_internal_net_id()

        nics = [{'net-id': mgmt_net_id},
                {'net-id': tenant_net_id},
                {'net-id': internal_net_id}]

        LOG.fixture_step(
            "(class) Boot a base vm with following nics: {}".format(nics))
        base_vm = vm_helper.boot_vm(name='multiports_base',
                                    flavor=flavor_id, nics=nics,
                                    cleanup='class',
                                    reuse_vol=False)[1]

        vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
        vm_helper.ping_vms_from_vm(base_vm, base_vm, net_types='data')

        return base_vm, flavor_id, mgmt_net_id, tenant_net_id, internal_net_id
Exemplo n.º 16
0
def test_something_avoid(modify_system_backing):
    """
    Test to AVOID! Do NOT parametrize module/class level fixture unless you are absolutely sure about the impact and
    intend to do so. Note that when a module level fixture is parametrized, both the setups AND teardowns will be run
    multiple times.

    Args:
        modify_system_backing:

    Setups:
        - Lock host, modify host storage backing to given backing, unlock host      (module)

    Test Steps:
        - Create a flavor with specified storage backing
        - Boot vm from above flavor

    Teardown:
        - Delete created vm, volume, flavor
        - Lock host, modify host storage backing to local_image, unlock host      (module)

    """
    LOG.tc_step("Create a flavor with specified storage backing")
    storage_backing = modify_system_backing
    flv_id = nova_helper.create_flavor(name='test_avoid_flv',
                                       storage_backing=storage_backing)[1]
    ResourceCleanup.add(resource_type='flavor', resource_id=flv_id)

    LOG.tc_step("Boot vm from above flavor")
    vm_id = vm_helper.boot_vm(name='test_avoid_vm', flavor=flv_id)[1]
    ResourceCleanup.add(resource_type='vm', resource_id=vm_id)
Exemplo n.º 17
0
def patch_function_check(request):
    vms = vm_helper.get_vms(name='patch', strict=False)
    boot_vm = False if len(vms) == 2 else True
    if not boot_vm:
        for vm in vms:
            if vm_helper.get_vm_status(vm) != VMStatus.ACTIVE or not vm_helper.ping_vms_from_natbox(vm, fail_ok=True):
                boot_vm = True
                break

    if boot_vm:
        if vms:
            vm_helper.delete_vms(vms, remove_cleanup='module')
        vms = []
        for source in ('volume', 'image'):
            vms.append(vm_helper.boot_vm(name='patch_{}'.format(source), source=source, cleanup='module')[1])

    def remove_on_teardown():
        LOG.info("Check vm status and delete if in bad state")
        for vm_ in vms:
            if vm_helper.get_vm_status(vm_) != VMStatus.ACTIVE:
                vm_helper.delete_vms(vm_, remove_cleanup='module')

        LOG.fixture_step("Remove test patches")
        remove_test_patches()
    request.addfinalizer(remove_on_teardown)

    return vms
Exemplo n.º 18
0
def _boot_vm_under_test(storage_backing, ephemeral, swap, cpu_pol, vcpus,
                        vm_type):
    LOG.tc_step(
        "Create a flavor with {} vcpus, {}G ephemera disk, {}M swap "
        "disk".format(vcpus, ephemeral, swap))
    flavor_id = nova_helper.create_flavor(
        name='migration_test', ephemeral=ephemeral, swap=swap, vcpus=vcpus,
        storage_backing=storage_backing, cleanup='function')[1]

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}

        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    boot_source = 'volume' if vm_type == 'volume' else 'image'
    LOG.tc_step("Boot a vm from {}".format(boot_source))
    vm_id = vm_helper.boot_vm('migration_test',
                              flavor=flavor_id, source=boot_source,
                              reuse_vol=False,
                              cleanup='function')[1]

    if vm_type == 'image_with_vol':
        LOG.tc_step("Attach volume to vm")
        vm_helper.attach_vol_to_vm(vm_id=vm_id, mount=False)

    return vm_id
Exemplo n.º 19
0
def _boot_vm_to_test(boot_source, vm_host, flavor_id):
    LOG.tc_step('Boot a vm with given flavor')
    vm_id = vm_helper.boot_vm(flavor=flavor_id,
                              avail_zone='cgcsauto',
                              vm_host=vm_host,
                              source=boot_source,
                              cleanup='function')[1]
    return vm_id
Exemplo n.º 20
0
def vif_model_check(request):
    vif_model = request.param

    LOG.fixture_step(
        "Get a network that supports {} to boot vm".format(vif_model))
    pci_net = network_helper.get_pci_vm_network(pci_type=vif_model,
                                                net_name='internal0-net')
    if not pci_net:
        skip(SkipHostIf.PCI_IF_UNAVAIL)

    extra_pcipt_net_name = extra_pcipt_net = None
    if not isinstance(pci_net, str):
        pci_net, extra_pcipt_net_name = pci_net
    LOG.info("PCI network selected to boot vm: {}".format(pci_net))

    LOG.fixture_step("Create a flavor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated',
                                          ram=2048,
                                          cleanup='module')[1]
    extra_specs = {FlavorSpec.CPU_POLICY: 'dedicated'}
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    LOG.fixture_step("Boot a base vm with above flavor and virtio nics")

    mgmt_net_id = network_helper.get_mgmt_net_id()
    pci_net_id, seg_id, pnet_name = network_helper.get_network_values(
        network=pci_net,
        fields=('id', 'provider:segmentation_id', 'provider:physical_network'))

    nics = [{'net-id': mgmt_net_id}, {'net-id': pci_net_id}]
    nics_to_test = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': pci_net_id,
        'vif-model': vif_model
    }]
    pcipt_seg_ids = {}
    if vif_model == 'pci-passthrough':
        pcipt_seg_ids[pci_net] = seg_id
        if extra_pcipt_net_name:
            extra_pcipt_net, seg_id = network_helper.get_network_values(
                network=extra_pcipt_net_name,
                fields=('id', 'provider:segmentation_id'))
            nics.append({'net-id': extra_pcipt_net})
            nics_to_test.append({
                'net-id': extra_pcipt_net,
                'vif-model': vif_model
            })
            pcipt_seg_ids[extra_pcipt_net_name] = seg_id

    base_vm = vm_helper.boot_vm(flavor=flavor_id, nics=nics,
                                cleanup='module')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(base_vm)
    vm_helper.ping_vms_from_vm(base_vm,
                               base_vm,
                               net_types=['mgmt', 'internal'])

    return vif_model, base_vm, flavor_id, nics_to_test, pcipt_seg_ids, pnet_name, extra_pcipt_net
Exemplo n.º 21
0
def fip_setups():
    # Create FIP and Associate VM to FIP
    floating_ip = network_helper.create_floating_ip(cleanup='module')[1]
    vm_id = vm_helper.boot_vm(cleanup='module')[1]

    network_helper.associate_floating_ip_to_vm(floating_ip=floating_ip,
                                               vm_id=vm_id)

    return vm_id, floating_ip
Exemplo n.º 22
0
def test_non_primary_tenant():
    vm_1 = vm_helper.boot_vm(cleanup='function',
                             auth_info=Tenant.get('tenant1'))[1]
    vm_2 = vm_helper.launch_vms(vm_type='dpdk',
                                auth_info=Tenant.get('tenant1'))[0][0]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_1)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_2)
    vm_helper.ping_vms_from_natbox(vm_ids=vm_2)
    vm_helper.ping_vms_from_vm(vm_2, vm_1, net_types='mgmt')
Exemplo n.º 23
0
def test_qos_update(setup_qos):
    """
    Tests network QoS update
    Test Setup:
    - create a qos policy
    - get mgmt net id
    - get internal net id
    - record the original qos values for above two networks
    - return qos, mgmt_net, internal_net

    Test Steps:
    -update networks with created qos
    -test ping over networks

    Test teardown:
    - restore the qos settings for both networks
    - delete the qos created by fixture
    """

    internal_net_id, mgmt_net_id, qos_new = setup_qos
    LOG.tc_step("Booting first vm.")
    nics = [{'net-id': mgmt_net_id}, {'net-id': internal_net_id}]

    vm1 = vm_helper.boot_vm(name='vm1', nics=nics, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm1)

    LOG.tc_step("Updating mgmt and internal networks to created QoS.")
    network_helper.update_net_qos(net_id=mgmt_net_id, qos_id=qos_new)
    network_helper.update_net_qos(net_id=internal_net_id, qos_id=qos_new)

    LOG.tc_step("Booting second vm.")
    vm2 = vm_helper.boot_vm(name='vm2', nics=nics, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm2)

    LOG.tc_step("Pinging vm1 from natbox after updating QoS.")
    vm_helper.wait_for_vm_pingable_from_natbox(vm1)

    LOG.tc_step("Testing ping between vms.")
    vm_helper.ping_vms_from_vm(to_vms=vm2,
                               from_vm=vm1,
                               net_types=['internal', 'mgmt'])
    vm_helper.ping_vms_from_vm(to_vms=vm1,
                               from_vm=vm2,
                               net_types=['internal', 'mgmt'])
def test_vm_meta_data_access_after_delete_add_interfaces_router(_router_info):
    """
    VM meta-data retrieval

    Test Steps:
        - Launch a boot-from-image vm
        - Retrieve vm meta_data within vm from metadata server
        - Ensure vm uuid from metadata server is the same as nova show
        - Delete all Router Interfaces
        - Re-add Router Interfaces
        - Verify metadata access works
        - Delete Router
        - Create Router and Add Interfaces
        - Verify metadata access works

    Test Teardown:
        - Ensure Router exist
        - Verify the external gateway info matches
        - Ensure all interfaces exist
        - Delete created vm and flavor
    """
    router_id, router_name, gateway_ip, ext_gateway_info, router_subnets, \
        ext_gateway_subnet, is_dvr = _router_info
    LOG.tc_step("Launch a boot-from-image vm")
    vm_id = vm_helper.boot_vm(source='image', cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    LOG.tc_step('Retrieve vm meta_data within vm from metadata server '
                'before Interface delete')
    _access_metadata_server_from_vm(vm_id=vm_id)

    LOG.tc_step('Delete Router Interfaces')
    _delete_router_interfaces(router_id, router_subnets, ext_gateway_subnet)
    LOG.tc_step('Re-add Router Interfaces')
    _add_router_interfaces(router_id, router_subnets, ext_gateway_subnet)

    LOG.tc_step('Retrieve vm meta_data within vm from metadata server '
                'after delete/add Router Interfaces')
    _access_metadata_server_from_vm(vm_id=vm_id)

    LOG.tc_step('Delete Router')
    network_helper.delete_router(router=router_id)

    LOG.tc_step('Create Router')
    router_id = network_helper.create_router(name=router_name)[1]

    LOG.tc_step('Set external gateway info for router {}'.format(router_id))
    _set_external_gatewayway_info(router_id, ext_gateway_subnet, gateway_ip,
                                  is_dvr)

    LOG.tc_step('Re-add Router Interfaces')
    _add_router_interfaces(router_id, router_subnets, ext_gateway_subnet)

    LOG.tc_step('Retrieve vm meta_data within vm from metadata server after '
                'delete/create Router')
    _access_metadata_server_from_vm(vm_id=vm_id)
Exemplo n.º 25
0
def test_lock_unlock_secure_boot_vm():
    """
    This is to test host lock with secure boot vm.

    :return:
    """
    guests_os = ['trusty_uefi', 'uefi_shell']
    disk_format = ['qcow2', 'raw']
    image_ids = []
    volume_ids = []
    for guest_os, disk_format in zip(guests_os, disk_format):
        image_ids.append(
            create_image_with_metadata(
                guest_os=guest_os,
                property_key=ImageMetadata.FIRMWARE_TYPE,
                values=['uefi'],
                disk_format=disk_format,
                container_format='bare'))
    # create a flavor
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1]
    ResourceCleanup.add('flavor', flavor_id)
    # boot a vm using the above image
    for image_id in image_ids:
        volume_ids.append(
            cinder_helper.create_volume(source_id=image_id[0],
                                        size=5,
                                        cleanup='function')[1])

    block_device_dic = [{
        'id': volume_ids[1],
        'source': 'volume',
        'bootindex': 0
    }, {
        'id': volume_ids[0],
        'source': 'volume',
        'bootindex': 1
    }]

    vm_id = vm_helper.boot_vm(name='sec-boot-vm',
                              source='block_device',
                              flavor=flavor_id,
                              block_device=block_device_dic,
                              cleanup='function',
                              guest_os=guests_os[0])[1]

    _check_secure_boot_on_vm(vm_id=vm_id)

    # Lock the compute node with the secure Vms
    compute_host = vm_helper.get_vm_host(vm_id=vm_id)
    host_helper.lock_host(compute_host, timeout=800)
    if not system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
    host_helper.unlock_host(compute_host, timeout=800)

    if system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
    def create_vms(self):
        LOG.tc_step('Create VMs')

        vm_name_format = 'pve_vm_{}'

        num_priorities = len(self.prioritizing)

        for sn in range(NUM_VM):

            name = vm_name_format.format(sn)
            if self.set_on_boot and sn < num_priorities:
                vm_id = vm_helper.boot_vm(
                    name=name,
                    meta={
                        VMMetaData.EVACUATION_PRIORITY: self.prioritizing[sn]
                    },
                    flavor=self.vms_info[sn]['flavor_id'],
                    source='volume',
                    avail_zone='cgcsauto',
                    vm_host=self.current_host,
                    cleanup='function')[1]
            else:
                vm_id = vm_helper.boot_vm(
                    name,
                    flavor=self.vms_info[sn]['flavor_id'],
                    source='volume',
                    avail_zone='cgcsauto',
                    vm_host=self.current_host,
                    cleanup='function')[1]
                if sn < num_priorities:
                    vm_helper.set_vm(vm_id,
                                     properties={
                                         VMMetaData.EVACUATION_PRIORITY:
                                         self.prioritizing[sn]
                                     })

            LOG.info('OK, VM{} created: id={}\n'.format(sn, vm_id))
            self.vms_info[sn].update(vm_id=vm_id,
                                     vm_name=name,
                                     priority=self.prioritizing[sn])

        LOG.info('OK, VMs created:\n{}\n'.format(
            [vm['vm_id'] for vm in self.vms_info.values()]))
Exemplo n.º 27
0
def create_instances(create_flavors_and_images, create_network_performance):
    LOG.fixture_step("Creating instances")
    net_id_list = list()
    net_id_list.append({"net-id": create_network_performance[0]})
    host = host_helper.get_hypervisors()[1]
    vm_id_1 = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
                                nics=net_id_list, source="image",
                                source_id=create_flavors_and_images["image"],
                                vm_host=host, cleanup="module")[1]
    vm_id_2 = vm_helper.boot_vm(flavor=create_flavors_and_images["flavor"],
                                nics=net_id_list, source="image",
                                source_id=create_flavors_and_images["image"],
                                vm_host=host, cleanup="module")[1]
    vm_ip_1 = vm_helper.get_vm_values(vm_id=vm_id_1, fields='addresses')[0].split("=")[1]
    vm_ip_2 = vm_helper.get_vm_values(vm_id=vm_id_2, fields='addresses')[0].split("=")[1]
    return {"vm_id_1": vm_id_1,
            "vm_id_2": vm_id_2,
            "vm_ip_1": vm_ip_1,
            "vm_ip_2": vm_ip_2}
Exemplo n.º 28
0
def test_system_persist_over_host_reboot(host_type, stx_openstack_required):
    """
    Validate Inventory summary over reboot of one of the controller see if data persists over reboot

    Test Steps:
        - capture Inventory summary for list of hosts on system service-list and neutron agent-list
        - reboot the current Controller-Active
        - Wait for reboot to complete
        - Validate key items from inventory persist over reboot

    """
    if host_type == 'controller':
        host = system_helper.get_active_controller_name()
    elif host_type == 'compute':
        if system_helper.is_aio_system():
            skip("No compute host for AIO system")

        host = None
    else:
        hosts = system_helper.get_hosts(personality='storage')
        if not hosts:
            skip(msg="Lab has no storage nodes. Skip rebooting storage node.")

        host = hosts[0]

    LOG.tc_step("Pre-check for system status")
    system_helper.wait_for_services_enable()
    up_hypervisors = host_helper.get_up_hypervisors()
    network_helper.wait_for_agents_healthy(hosts=up_hypervisors)

    LOG.tc_step("Launch a vm")
    vm_id = vm_helper.boot_vm(cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if host is None:
        host = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Reboot a {} node and wait for reboot completes: {}".format(host_type, host))
    HostsToRecover.add(host)
    host_helper.reboot_hosts(host)
    host_helper.wait_for_hosts_ready(host)

    LOG.tc_step("Check vm is still active and pingable after {} reboot".format(host))
    vm_helper.wait_for_vm_status(vm_id, status=VMStatus.ACTIVE, fail_ok=False)
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id, timeout=VMTimeout.DHCP_RETRY)

    LOG.tc_step("Check neutron agents and system services are in good state after {} reboot".format(host))
    network_helper.wait_for_agents_healthy(up_hypervisors)
    system_helper.wait_for_services_enable()

    if host in up_hypervisors:
        LOG.tc_step("Check {} can still host vm after reboot".format(host))
        if not vm_helper.get_vm_host(vm_id) == host:
            time.sleep(30)
            vm_helper.live_migrate_vm(vm_id, destination_host=host)
Exemplo n.º 29
0
def launch_instances(create_flavour_and_image, create_network_sanity, snapshot_from_instance):
    global VM_IDS
    net_id_list = list()
    net_id_list.append({"net-id": create_network_sanity})
    host = system_helper.get_active_controller_name()
    launch_instances = vm_helper.boot_vm(flavor=create_flavour_and_image["flavor1"],
                                         nics=net_id_list, source="snapshot",
                                         source_id=snapshot_from_instance,
                                         vm_host=host, cleanup="module")[1]
    VM_IDS.append(launch_instances)
    return launch_instances
Exemplo n.º 30
0
def test_force_lock_with_non_mig_vms(add_host_to_zone):
    """
    Test force lock host with non-migrate-able vms on it

    Prerequisites:
        - Minimum of two up hypervisors
    Test Setups:
        - Add admin role to primary tenant
        - Create cgcsauto aggregate
        - Add host_under_test to cgcsauto aggregate
        - Create flavor for vms_to_test with storage_backing support by host_under_test
        - Create vms_to_test on host_under_test that can be live migrated
    Test Steps:
        - Force lock target host
        - Verify force lock returns 0
        - Verify VMs cannot find a host to boot and are in error state
        - Unlock locked target host
        - Verify VMs are active on host once it is up and available
        - Verify VMs can be pinged
    Test Teardown:
        - Remove admin role from primary tenant
        - Delete created vms
        - Remove host_under_test from cgcsauto aggregate
    """
    storage_backing, host_under_test = add_host_to_zone

    # Create flavor with storage_backing the host_under_test supports
    flavor_id = nova_helper.create_flavor(storage_backing=storage_backing)[1]

    # Boot VMs on the host using the above flavor.
    LOG.tc_step("Boot VM on {}".format(host_under_test))
    vm_id = vm_helper.boot_vm(vm_host=host_under_test,
                              flavor=flavor_id,
                              avail_zone='cgcsauto',
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    # Force lock host that VMs are booted on.
    LOG.tc_step("Force lock {}".format(host_under_test))
    HostsToRecover.add(host_under_test)
    lock_code, lock_output = host_helper.lock_host(host_under_test, force=True)
    assert lock_code == 0, "Failed to lock {}. Details: {}".format(
        host_under_test, lock_output)

    vm_helper.wait_for_vm_values(vm_id, fail_ok=False, **{'status': 'ERROR'})

    host_helper.unlock_host(host_under_test)

    vm_helper.wait_for_vm_values(vm_id,
                                 timeout=300,
                                 fail_ok=False,
                                 **{'status': 'ACTIVE'})
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id,
                                               timeout=VMTimeout.DHCP_RETRY)