Beispiel #1
0
def volumes_(image_):
    """
    Text fixture to create two large cinder volumes with size of 20 and 40 GB.
    Args:
        image_: the guest image_id

    Returns: list of volume dict as following:
        {'id': <volume_id>,
         'display_name': <vol_inst1 or vol_inst2>,
         'size': <20 or 40>
        }
    """

    volumes = []
    cinder_params = [{
        'name': 'vol_inst1',
        'size': 20
    }, {
        'name': 'vol_inst2',
        'size': 40
    }]

    for param in cinder_params:
        volume_id = \
            cinder_helper.create_volume(name=param['name'], source_id=image_,
                                        size=param['size'])[1]
        volume = {
            'id': volume_id,
            'display_name': param['name'],
            'size': param['size']
        }
        volumes.append(volume)
        ResourceCleanup.add('volume', volume['id'], scope='function')

    return volumes
Beispiel #2
0
def test_db_purge():

    end_time = time.time() + 7200

    count = 1
    while time.time() < end_time:

        LOG.tc_step(
            "Iteration-{}: Creating and deleting image, volume, vm".format(
                count))
        LOG.info("------ Creating image, volume, vm")
        image_id = glance_helper.create_image(
            name='glance-purge',
            cleanup='function',
            **{ImageMetadata.AUTO_RECOVERY: 'true'})[1]
        vol_id = cinder_helper.create_volume(name='cinder-purge',
                                             source_id=image_id)[1]
        vm_id = vm_helper.boot_vm(name='nova-purge',
                                  source='volume',
                                  source_id=vol_id)[1]

        time.sleep(60)

        LOG.info("------ Deleting vm, volume, image")
        vm_helper.delete_vms(vms=vm_id)
        cinder_helper.delete_volumes(volumes=vol_id)
        glance_helper.delete_images(images=image_id)

        time.sleep(60)
        count += 1
Beispiel #3
0
def volume_from_instance(request, create_flavour_and_image):
    return cinder_helper.create_volume(
        name=request.param,
        source_type='image',
        source_id=create_flavour_and_image['image'],
        size=cirros_params['flavor_disk'],
        cleanup="module")[1]
Beispiel #4
0
def prepare_resource(add_admin_role_module):
    hypervisor = random.choice(host_helper.get_up_hypervisors())
    flavor = nova_helper.create_flavor(name='flavor-1g',
                                       ram=1024,
                                       cleanup='module')[1]
    vol_id = cinder_helper.create_volume('vol-mem_page_size',
                                         cleanup='module')[1]
    return hypervisor, flavor, vol_id
def test_lock_unlock_secure_boot_vm():
    """
    This is to test host lock with secure boot vm.

    :return:
    """
    guests_os = ['trusty_uefi', 'uefi_shell']
    disk_format = ['qcow2', 'raw']
    image_ids = []
    volume_ids = []
    for guest_os, disk_format in zip(guests_os, disk_format):
        image_ids.append(
            create_image_with_metadata(
                guest_os=guest_os,
                property_key=ImageMetadata.FIRMWARE_TYPE,
                values=['uefi'],
                disk_format=disk_format,
                container_format='bare'))
    # create a flavor
    flavor_id = nova_helper.create_flavor(vcpus=2, ram=1024, root_disk=5)[1]
    ResourceCleanup.add('flavor', flavor_id)
    # boot a vm using the above image
    for image_id in image_ids:
        volume_ids.append(
            cinder_helper.create_volume(source_id=image_id[0],
                                        size=5,
                                        cleanup='function')[1])

    block_device_dic = [{
        'id': volume_ids[1],
        'source': 'volume',
        'bootindex': 0
    }, {
        'id': volume_ids[0],
        'source': 'volume',
        'bootindex': 1
    }]

    vm_id = vm_helper.boot_vm(name='sec-boot-vm',
                              source='block_device',
                              flavor=flavor_id,
                              block_device=block_device_dic,
                              cleanup='function',
                              guest_os=guests_os[0])[1]

    _check_secure_boot_on_vm(vm_id=vm_id)

    # Lock the compute node with the secure Vms
    compute_host = vm_helper.get_vm_host(vm_id=vm_id)
    host_helper.lock_host(compute_host, timeout=800)
    if not system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
    host_helper.unlock_host(compute_host, timeout=800)

    if system_helper.is_aio_simplex():
        _check_secure_boot_on_vm(vm_id=vm_id)
def test_kpi_cinder_volume_creation(collect_kpi):
    """
    KPI test  - cinder  volume creation
    Args:
        collect_kpi:

    Test Steps:
        - Create a 20g cinder volume using default tis guest
        - Collect duration kpi from cinder create cli sent to volume available

    """
    if not collect_kpi:
        skip("KPI only test. Skip due to kpi collection is not enabled.")

    LOG.tc_step(
        "Create a 20g volume from default tis guest and collect image download rate, "
        "image conversion rate, and total volume creation time")
    # init_time = common.get_date_in_format(date_format=KPI_DATE_FORMAT)
    image = glance_helper.get_guest_image(guest_os='tis-centos-guest-qcow2',
                                          cleanup='function')

    vol_id = cinder_helper.create_volume(name='20g',
                                         source_id=image,
                                         size=20,
                                         cleanup='function')[1]
    vol_updated = cinder_helper.get_volume_show_values(
        vol_id, 'updated_at')[0].split('.')[0]

    # Logs no longer available for image downloading and conversion.
    # code_download, out_download = \
    #     kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ImageDownload.NAME,
    #                               host=None,
    #                               log_path=ImageDownload.LOG_PATH,
    #                               end_pattern=ImageDownload.GREP_PATTERN,
    #                               python_pattern=ImageDownload.PYTHON_PATTERN,
    #                               init_time=init_time, uptime=1,
    #                               unit=ImageDownload.UNIT)
    # code_conv, out_conv = \
    #     kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=ImageConversion.NAME,
    #                               host=None,
    #                               log_path=ImageConversion.LOG_PATH,
    #                               end_pattern=ImageConversion.GREP_PATTERN,
    #                               python_pattern=ImageConversion.PYTHON_PATTERN,
    #                               init_time=init_time, uptime=1,
    #                               unit=ImageConversion.UNIT)
    code_create, out_create = \
        kpi_log_parser.record_kpi(local_kpi_file=collect_kpi, kpi_name=VolCreate.NAME, host=None,
                                  log_path=VolCreate.LOG_PATH, end_pattern=vol_updated,
                                  start_pattern=VolCreate.START, uptime=1)
    #
    # assert code_download == 0, out_download
    # assert code_conv == 0, out_conv
    assert code_create == 0, out_create
Beispiel #7
0
def test_modify_timezone_cli_timestamps(stx_openstack_required):
    """
    Test correct timestamps in:
        - ceilometer
        - cinder
        - glance
        - neutron
        - nova
        - sysinv

    Setups
        - Get a random timezone for testing
        - Create cinder volume
        - Boot a vm

    Test Steps
        - Save the pre-timezone-change timestamps from each cli domain
        - Modify the timezone
        - Wait for out_of_date alarms to clear
        - Save the post-timezone-change timestamps from each cli domain
        - Verify the timestamps have changed to be in line with the timezone change

    Teardown
        - Deleted cinder volume
        - Delete the vm

    """
    services = ('sysinv', 'openstack')

    prev_timezone = system_helper.get_timezone()
    post_timezone = __select_diff_timezone(current_zone=prev_timezone)

    # CHECK PRE TIMEZONE CHANGE CLI TIMESTAMPS
    LOG.tc_step("Getting CLI timestamps before timezone change for: {}".format(services))
    vol_id = cinder_helper.create_volume('timezone_test', cleanup='function')[1]

    prev_timestamps = get_cli_timestamps(vol_id=vol_id)
    LOG.tc_step("Modify timezone from {} to {}".format(prev_timezone, post_timezone))
    system_helper.modify_timezone(post_timezone)

    # CHECK POST TIMEZONE CHANGE CLI TIMESTAMPS
    time.sleep(10)
    LOG.tc_step("Getting CLI timestamps after timezone change for: {}".format(services))
    post_timestamps = get_cli_timestamps(vol_id=vol_id)

    LOG.tc_step("Comparing timestamps from before and after timezone change for: {}".format(services))
    failed_services = []
    for i in range(len(services) - 1):      # -1 to ignore last item opentack cli (CGTS-10475)
        if prev_timestamps[i] == post_timestamps[i]:
            failed_services.append(services[i])

    assert not failed_services, "{} timestamp did not update after timezone modify".format(failed_services)
Beispiel #8
0
def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, expt_err):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if flv_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: flv_pol}

        LOG.tc_step("Set following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor_id, **specs)

    if img_pol is not None:
        image_meta = {ImageMetadata.CPU_POLICY: img_pol}
        LOG.tc_step("Create image with following metadata: {}".format(image_meta))
        image_id = glance_helper.create_image(name='cpu_pol_{}'.format(img_pol), cleanup='function', **image_meta)[1]
    else:
        image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)

    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol_img', source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format(boot_source))
    code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, source=boot_source,
                                         source_id=source_id, fail_ok=True, cleanup='function')

    # check for negative tests
    if expt_err is not None:
        LOG.tc_step("Check VM failed to boot due to conflict in flavor and image.")
        assert 4 == code, "Expect boot vm cli reject and no vm booted. Actual: {}".format(msg)
        assert eval(expt_err) in msg, "Expected error message is not found in cli return."
        return  # end the test for negative cases

    # Check for positive tests
    LOG.tc_step("Check vm is successfully booted.")
    assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg)

    # Calculate expected policy:
    expt_cpu_pol = flv_pol if flv_pol else img_pol
    expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared'

    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=expt_cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
def test_ping_vms_from_vm_various_images(vm_image):
    image_id = glance_helper.get_image_id_from_name(name=vm_image,
                                                    strict=False)
    if not image_id:
        skip("No image name has substring: {}.".format(vm_image))

    vol_size = 1
    if vm_image in ['ubuntu', 'centos']:
        vol_size = 8
    vol_id = cinder_helper.create_volume(name='vol_' + vm_image,
                                         source_id=image_id,
                                         size=vol_size)[1]
    vm_id = vm_helper.boot_vm(source='volume', source_id=vol_id)[1]

    vm_helper.ping_vms_from_vm(from_vm=vm_id)
Beispiel #10
0
def launch_vm(vm_type, num_vcpu, host=None):
    img_id = None
    if vm_type == 'vhost':
        vif_model = 'virtio'
        if num_vcpu > 2:
            img_id = image_with_vif_multiq()
    else:
        vif_model = 'avp'

    LOG.tc_step("Boot a {} vm with {} vcpus on {}".format(
        vm_type, num_vcpu, host if host else "any host"))
    flavor_id = nova_helper.create_flavor(vcpus=num_vcpu,
                                          ram=1024,
                                          root_disk=2)[1]
    ResourceCleanup.add('flavor', flavor_id)
    extra_specs = {
        FlavorSpec.VCPU_MODEL: 'SandyBridge',
        FlavorSpec.CPU_POLICY: 'dedicated',
        FlavorSpec.MEM_PAGE_SIZE: '2048'
    }
    nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    nic1 = {'net-id': network_helper.get_mgmt_net_id()}
    nic2 = {'net-id': network_helper.get_tenant_net_id()}
    nic3 = {'net-id': network_helper.get_internal_net_id()}
    if vif_model != 'virtio':
        nic2['vif-model'] = vif_model
        nic3['vif-model'] = vif_model

    vol = cinder_helper.create_volume(source_id=img_id, cleanup='function')[1]
    host_info = {'avail_zone': 'nova', 'vm_host': host} if host else {}
    vm_id = vm_helper.boot_vm(name='dpdk-vm',
                              nics=[nic1, nic2, nic3],
                              flavor=flavor_id,
                              user_data=_get_dpdk_user_data(),
                              source='volume',
                              source_id=vol,
                              cleanup='function',
                              **host_info)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_id)

    if host:
        vm_host = vm_helper.get_vm_host(vm_id)
        assert vm_host == host, "VM is not launched on {} as specified".format(
            host)

    return vm_id
Beispiel #11
0
def _boot_vm_vcpu_model(flv_model=None,
                        img_model=None,
                        boot_source='volume',
                        avail_zone=None,
                        vm_host=None):
    LOG.tc_step(
        "Attempt to launch vm from {} with image vcpu model metadata: {}; flavor vcpu model extra spec: {}"
        .format(boot_source, img_model, flv_model))

    flv_id = nova_helper.create_flavor(name='vcpu_{}'.format(flv_model))[1]
    ResourceCleanup.add('flavor', flv_id)
    if flv_model:
        nova_helper.set_flavor(flavor=flv_id,
                               **{FlavorSpec.VCPU_MODEL: flv_model})

    if img_model:
        image_id = glance_helper.create_image(
            name='vcpu_{}'.format(img_model),
            cleanup='function',
            **{ImageMetadata.CPU_MODEL: img_model})[1]
    else:
        image_id = glance_helper.get_guest_image(
            guest_os=GuestImages.DEFAULT['guest'])

    if boot_source == 'image':
        source_id = image_id
    else:
        source_id = cinder_helper.create_volume(name='vcpu_model',
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)

    code, vm, msg = vm_helper.boot_vm(name='vcpu_model',
                                      flavor=flv_id,
                                      source=boot_source,
                                      source_id=source_id,
                                      fail_ok=True,
                                      cleanup='function',
                                      avail_zone=avail_zone,
                                      vm_host=vm_host)
    return code, vm, msg
Beispiel #12
0
def test_autorecovery_image_metadata_in_volume(auto_recovery, disk_format, container_format):
    """
    Create image with given metadata/property.

    Args:
        auto_recovery (str): value for sw_wrs_auto_recovery to set in image
        disk_format (str): such as 'raw', 'qcow2'
        container_format (str): such as bare

    Test Steps;
        - Create image with given disk format, container format, property key and value pair
        - Verify property value is correctly set via glance image-show

    Teardown:
        - Delete created images

    """
    property_key = ImageMetadata.AUTO_RECOVERY

    LOG.tc_step("Create an image with property auto_recovery={}, disk_format={}, container_format={}".
                format(auto_recovery, disk_format, container_format))
    image_id = glance_helper.create_image(disk_format=disk_format, container_format=container_format,
                                          cleanup='function', **{property_key: auto_recovery})[1]

    LOG.tc_step("Create a volume from the image")
    vol_id = cinder_helper.create_volume(name='auto_recov', source_id=image_id, cleanup='function')[1]

    LOG.tc_step("Verify image properties are shown in cinder list")
    field = 'volume_image_metadata'
    vol_image_metadata_dict = cinder_helper.get_volume_show_values(vol_id, fields=field)[0]
    LOG.info("vol_image_metadata dict: {}".format(vol_image_metadata_dict))

    assert auto_recovery.lower() == vol_image_metadata_dict[property_key].lower(), \
        "Actual volume image property {} value - {} is different than value set in image - {}".format(
                property_key, vol_image_metadata_dict[property_key], auto_recovery)

    assert disk_format == vol_image_metadata_dict['disk_format']
    assert container_format == vol_image_metadata_dict['container_format']
Beispiel #13
0
def test_attempt_to_delete_volume_associated_with_snapshot():
    """
    This is a negative test to verify that volumes with associated snapshots
    cannot be deleted.

    Test Steps:
    1.  Create a volume
    2.  Launch a VM with that volume
    3.  Create a snapshot based on that VM
    4.  Delete the VM, leaving behind the volume and snapshot
    5.  Attempt to delete volume.  Rejeted.
    6.  Delete the snapshot.
    7.  Delete the volume.

    Teardown:
    1.  Delete VMs
    2.  Delete volumes
    3.  Delete snapshots
    4.  Delete images

    Possible Improvements:
    1.  Could update test to use non-raw images, but determining size of of
    image is more complex if the original file is no longer on the filesystem.
    """

    LOG.tc_step("Get available images")
    image_list = glance_helper.get_images()

    if len(image_list) == 0:
        skip("The test requires some images to be present")

    # Filter out zero-sized images and non-raw images (latter is lazy)
    image_uuid = vol_size = None
    for image in image_list:
        image_uuid = image
        image_prop_s, image_prop_d = glance_helper.get_image_values(
            image_uuid, ("size", "disk_format"))
        if str(image_prop_s) == "0" or image_prop_d != "raw":
            continue
        else:
            divisor = 1024 * 1024 * 1024
            image_size = int(image_prop_s)
            vol_size = int(math.ceil(image_size / divisor))
            break

    else:
        skip("No usable images found")

    LOG.tc_step("Create a cinder bootable volume")
    # Check if lab has emc-vnx volume types. Use volume type = iscsi;
    # Creating snapshot with emc-vnx(EMS San)
    # is not supported yet.
    volume_types = cinder_helper.get_volume_types(field='Name')
    vol_type = 'iscsi' if any('emc' in t for t in volume_types) else None
    vol_id = cinder_helper.create_volume(source_id=image_uuid,
                                         vol_type=vol_type,
                                         size=vol_size,
                                         fail_ok=False,
                                         cleanup='function')[1]

    LOG.tc_step("Boot VM using newly created bootable volume")
    vm_id = vm_helper.boot_vm(source="volume",
                              source_id=vol_id,
                              cleanup='function')[1]
    assert vm_id, "Failed to boot VM"

    # nova image-create generates a glance image of 0 size
    # real snapshot is stored in cinder
    LOG.tc_step("Create a snapshot based on that VM")
    vm_name = vm_helper.get_vm_name_from_id(vm_id)
    snapshot_name = vm_name + "_snapshot"
    code, image_id, snapshot_id = vm_helper.create_image_from_vm(
        vm_id,
        image_name=snapshot_name,
        cleanup='function',
        expt_cinder_snapshot=True)

    # We're deleting the VM, but leaving the volume and the snapshot
    LOG.tc_step("Delete the VM")
    vm_helper.delete_vms(vms=vm_id, fail_ok=False)

    LOG.tc_step("Attempting to delete the volume with associated snapshot")
    rc, out = cinder_helper.delete_volumes(vol_id, fail_ok=True)
    assert rc == 1, "Volume deletion was expected to fail but instead succeeded"

    LOG.tc_step("Delete the snapshot")
    cinder_helper.delete_volume_snapshots(snapshot_id, fail_ok=False)

    LOG.tc_step("Re-attempt volume deletion")
    # This step has been failing on ip33-36 and sm-1 due to volume delete
    # rejected. After a minute or so,
    # it was accepted though.
    cinder_helper.delete_volumes(vol_id, fail_ok=False)
def test_attach_cinder_volume_to_instance(vol_vif, check_avs_pattern):
    """
    Validate that cinder volume can be attached to VM created using wrl5_avp and wrl5_virtio image

    Args:
        vol_vif (str)

    Test Steps:
        - Create cinder volume
        - Boot VM use WRL image
        - Attach cinder volume to WRL virtio/avp instance
        - Check VM nics vifs are not changed

    Teardown:
        - Delete VM
        - Delete cinder volume
    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else 'virtio'
    nics = [
        {
            'net-id': mgmt_net_id
        },
        {
            'net-id': tenant_net_id
        },
        {
            'net-id': internal_net_id,
            'vif-model': vif_model
        },
    ]

    LOG.tc_step("Boot up VM from default tis image")
    vm_id = vm_helper.boot_vm(name='vm_attach_vol_{}'.format(vol_vif),
                              source='image',
                              nics=nics,
                              cleanup='function')[1]

    prev_ports = network_helper.get_ports(server=vm_id)

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(vol_vif))
    img_id = glance_helper.create_image('vif_{}'.format(vol_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           vol_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(vol_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    # boot a cinder volume and attached it to vm
    LOG.tc_step("Attach cinder Volume to VM")
    vm_helper.attach_vol_to_vm(vm_id, vol_id=volume_id)

    LOG.tc_step("Check vm nics vif models are not changed")
    post_ports = network_helper.get_ports(server=vm_id)

    assert prev_ports == post_ports
Beispiel #15
0
def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs):
    """
    Ping between two vms with given vif models

    Test Steps:
        - Create a favor with dedicated cpu policy and proper root disk size
        - Create a volume from guest image under test with proper size
        - Boot two vms with given vif models from above volume and flavor
        - Ping VMs from NatBox and between two vms

    Test Teardown:
        - Delete vms, volumes, flavor, glance image created

    """
    if guest_os == 'default':
        guest_os = GuestImages.DEFAULT['guest']

    reuse = False if 'e1000' in vm1_vifs or 'e1000' in vm2_vifs else True
    cleanup = 'function' if not reuse or 'ubuntu' in guest_os else None
    image_id = glance_helper.get_guest_image(guest_os, cleanup=cleanup,
                                             use_existing=reuse)

    LOG.tc_step("Create a favor dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated', guest_os=guest_os,
                                          cleanup='function')[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    net_ids = (mgmt_net_id, tenant_net_id, internal_net_id)
    vms = []
    for vifs_for_vm in (vm1_vifs, vm2_vifs):
        # compose vm nics
        nics = _compose_nics(vifs_for_vm, net_ids=net_ids, image_id=image_id,
                             guest_os=guest_os)
        net_types = ['mgmt', 'data', 'internal'][:len(nics)]
        LOG.tc_step("Create a volume from {} image".format(guest_os))
        vol_id = cinder_helper.create_volume(name='vol-{}'.format(guest_os),
                                             source_id=image_id,
                                             guest_image=guest_os,
                                             cleanup='function')[1]

        LOG.tc_step(
            "Boot a {} vm with {} vifs from above flavor and volume".format(
                guest_os, vifs_for_vm))
        vm_id = vm_helper.boot_vm('{}_vifs'.format(guest_os), flavor=flavor_id,
                                  cleanup='function',
                                  source='volume', source_id=vol_id, nics=nics,
                                  guest_os=guest_os)[1]

        LOG.tc_step("Ping VM {} from NatBox(external network)".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        vms.append(vm_id)

    LOG.tc_step(
        "Ping between two vms over management, data, and internal networks")
    vm_helper.ping_vms_from_vm(to_vms=vms[0], from_vm=vms[1],
                               net_types=net_types)
    vm_helper.ping_vms_from_vm(to_vms=vms[1], from_vm=vms[0],
                               net_types=net_types)
def test_vif_model_from_image(img_vif, check_avs_pattern):
    """
    Test vif model set in image metadata is reflected in vm nics when use normal vnic type.
    Args:
        img_vif (str):
        check_avs_pattern:

    Test Steps:
        - Create a glance image with given img_vif in metadata
        - Create a cinder volume from above image
        - Create a vm with 3 vnics from above cinder volume:
            - nic1 and nic2 with normal vnic type
            - nic3 with avp (if AVS, otherwise normal)
        - Verify nic1 and nic2 vif model is the same as img_vif
        - Verify nic3 vif model is avp (if AVS, otherwise normal)

    """

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(img_vif))
    img_id = glance_helper.create_image('vif_{}'.format(img_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           img_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(img_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else img_vif
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id,
        'vif-model': vif_model
    }]

    LOG.tc_step(
        "Boot a vm from above volume with following nics: {}".format(nics))
    vm_id = vm_helper.boot_vm(name='vif_img_{}'.format(img_vif),
                              nics=nics,
                              source='volume',
                              source_id=volume_id,
                              cleanup='function')[1]

    LOG.tc_step(
        "Verify vnics info from virsh to ensure tenant net vif is as specified in image metadata"
    )
    internal_mac = network_helper.get_ports(server=vm_id,
                                            network=internal_net_id,
                                            field='MAC Address')[0]
    vm_interfaces = vm_helper.get_vm_interfaces_via_virsh(vm_id)
    for vm_if in vm_interfaces:
        if_mac, if_model = vm_if
        if if_mac == internal_mac:
            assert if_model == vif_model
        else:
            assert if_model == img_vif
Beispiel #17
0
def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    image_id = glance_helper.get_image_id_from_name(
        GuestImages.DEFAULT['guest'], strict=True)
    if cpu_pol is not None:
        if pol_source == 'flavor':
            specs = {FlavorSpec.CPU_POLICY: cpu_pol}

            LOG.tc_step("Set following extra specs: {}".format(specs))
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            image_meta = {ImageMetadata.CPU_POLICY: cpu_pol}
            LOG.tc_step(
                "Create image with following metadata: {}".format(image_meta))
            image_id = glance_helper.create_image(
                name='cpu_pol_{}'.format(cpu_pol),
                cleanup='function',
                **image_meta)[1]
    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol),
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step(
        "Boot a vm from {} with above flavor and check vm topology is as "
        "expected".format(boot_source))
    vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus),
                              flavor=flavor_id,
                              source=boot_source,
                              source_id=source_id,
                              cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Suspend/Resume vm and check vm topology stays the same")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Stop/Start vm and check vm topology stays the same")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    prev_siblings = check_helper.check_topology_of_vm(
        vm_id,
        vcpus=flv_vcpus,
        cpu_pol=cpu_pol,
        vm_host=vm_host,
        prev_total_cpus=prev_cpus[vm_host])[1]

    LOG.tc_step("Live migrate vm and check vm topology stays the same")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host],
                                      prev_siblings=prev_siblings)

    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Beispiel #18
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])
Beispiel #19
0
def test_migrate_vm(check_system, guest_os, mig_type, cpu_pol):
    """
    Test migrate vms for given guest type
    Args:
        check_system:
        guest_os:
        mig_type:
        cpu_pol:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image
        - Live/cold migrate the vm
        - Ensure vm moved to other host and in good state (active and
            reachabe from NatBox)

    """
    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = \
        nova_helper.create_flavor(name='{}-mig'.format(mig_type), vcpus=1,
                                  root_disk=9, cleanup='function')[1]

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    image_id = glance_helper.get_guest_image(guest_os=guest_os)

    vol_id = cinder_helper.create_volume(source_id=image_id, size=9,
                                         guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm(guest_os, flavor=flavor_id, source='volume',
                              source_id=vol_id, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    if guest_os == 'ubuntu_14':
        system_helper.wait_for_alarm_gone(alarm_id=EventLogID.CINDER_IO_CONGEST,
                                          entity_id='cinder_io_monitor',
                                          strict=False, timeout=300,
                                          fail_ok=False)

    LOG.tc_step("{} migrate vm and check vm is moved to different host".format(
        mig_type))
    prev_vm_host = vm_helper.get_vm_host(vm_id)

    if mig_type == 'live':
        code, output = vm_helper.live_migrate_vm(vm_id)
        if code == 1:
            assert False, "No host to live migrate to. System may not be in " \
                          "good state."
    else:
        vm_helper.cold_migrate_vm(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    assert prev_vm_host != vm_host, "vm host did not change after {} " \
                                    "migration".format(mig_type)

    LOG.tc_step("Ping vm from NatBox after {} migration".format(mig_type))
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Beispiel #20
0
    def test_evacuate_vms_with_inst_backing(self, hosts_per_backing,
                                            storage_backing):
        """
        Test evacuate vms with various vm storage configs and host instance
        backing configs

        Args:
            storage_backing: storage backing under test

        Skip conditions:
            - Less than two hosts configured with storage backing under test

        Setups:
            - Add admin role to primary tenant (module)

        Test Steps:
            - Create flv_rootdisk without ephemeral or swap disks, and set
            storage backing extra spec
            - Create flv_ephemswap with ephemeral AND swap disks, and set
            storage backing extra spec
            - Boot following vms on same host and wait for them to be
            pingable from NatBox:
                - Boot vm1 from volume with flavor flv_rootdisk
                - Boot vm2 from volume with flavor flv_localdisk
                - Boot vm3 from image with flavor flv_rootdisk
                - Boot vm4 from image with flavor flv_rootdisk, and attach a
                volume to it
                - Boot vm5 from image with flavor flv_localdisk
            - sudo reboot -f on vms host
            - Ensure evacuation for all 5 vms are successful (vm host
            changed, active state, pingable from NatBox)

        Teardown:
            - Delete created vms, volumes, flavors
            - Remove admin role from primary tenant (module)

        """
        hosts = hosts_per_backing.get(storage_backing, [])
        if len(hosts) < 2:
            skip(
                SkipStorageBacking.LESS_THAN_TWO_HOSTS_WITH_BACKING.format(
                    storage_backing))

        target_host = hosts[0]

        LOG.tc_step("Create a flavor without ephemeral or swap disks")
        flavor_1 = nova_helper.create_flavor(
            'flv_rootdisk', storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_1, scope='function')

        LOG.tc_step("Create another flavor with ephemeral and swap disks")
        flavor_2 = nova_helper.create_flavor(
            'flv_ephemswap',
            ephemeral=1,
            swap=512,
            storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_2, scope='function')

        LOG.tc_step("Boot vm1 from volume with flavor flv_rootdisk and wait "
                    "for it pingable from NatBox")
        vm1_name = "vol_root"
        vm1 = vm_helper.boot_vm(vm1_name,
                                flavor=flavor_1,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vms_info = {
            vm1: {
                'ephemeral': 0,
                'swap': 0,
                'vm_type': 'volume',
                'disks': vm_helper.get_vm_devices_via_virsh(vm1)
            }
        }
        vm_helper.wait_for_vm_pingable_from_natbox(vm1)

        LOG.tc_step("Boot vm2 from volume with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm2_name = "vol_ephemswap"
        vm2 = vm_helper.boot_vm(vm2_name,
                                flavor=flavor_2,
                                source='volume',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm2)
        vms_info[vm2] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'volume',
            'disks': vm_helper.get_vm_devices_via_virsh(vm2)
        }

        LOG.tc_step(
            "Boot vm3 from image with flavor flv_rootdisk and wait for "
            "it pingable from NatBox")
        vm3_name = "image_root"
        vm3 = vm_helper.boot_vm(vm3_name,
                                flavor=flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vm_helper.wait_for_vm_pingable_from_natbox(vm3)
        vms_info[vm3] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm3)
        }

        LOG.tc_step("Boot vm4 from image with flavor flv_rootdisk, attach a "
                    "volume to it and wait for it "
                    "pingable from NatBox")
        vm4_name = 'image_root_attachvol'
        vm4 = vm_helper.boot_vm(vm4_name,
                                flavor_1,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]

        vol = cinder_helper.create_volume(bootable=False)[1]
        ResourceCleanup.add('volume', vol, scope='function')
        vm_helper.attach_vol_to_vm(vm4, vol_id=vol, mount=False)

        vm_helper.wait_for_vm_pingable_from_natbox(vm4)
        vms_info[vm4] = {
            'ephemeral': 0,
            'swap': 0,
            'vm_type': 'image_with_vol',
            'disks': vm_helper.get_vm_devices_via_virsh(vm4)
        }

        LOG.tc_step("Boot vm5 from image with flavor flv_localdisk and wait "
                    "for it pingable from NatBox")
        vm5_name = 'image_ephemswap'
        vm5 = vm_helper.boot_vm(vm5_name,
                                flavor_2,
                                source='image',
                                avail_zone='nova',
                                vm_host=target_host,
                                cleanup='function')[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm5)
        vms_info[vm5] = {
            'ephemeral': 1,
            'swap': 512,
            'vm_type': 'image',
            'disks': vm_helper.get_vm_devices_via_virsh(vm5)
        }

        LOG.tc_step("Check all VMs are booted on {}".format(target_host))
        vms_on_host = vm_helper.get_vms_on_host(hostname=target_host)
        vms = [vm1, vm2, vm3, vm4, vm5]
        assert set(vms) <= set(vms_on_host), "VMs booted on host: {}. " \
                                             "Current vms on host: {}". \
            format(vms, vms_on_host)

        for vm_ in vms:
            LOG.tc_step("Touch files under vm disks {}: "
                        "{}".format(vm_, vms_info[vm_]))
            file_paths, content = touch_files_under_vm_disks(
                vm_, **vms_info[vm_])
            vms_info[vm_]['file_paths'] = file_paths
            vms_info[vm_]['content'] = content

        LOG.tc_step("Reboot target host {}".format(target_host))
        vm_helper.evacuate_vms(host=target_host,
                               vms_to_check=vms,
                               ping_vms=True)

        LOG.tc_step("Check files after evacuation")
        for vm_ in vms:
            LOG.info("--------------------Check files for vm {}".format(vm_))
            check_helper.check_vm_files(vm_id=vm_,
                                        vm_action='evacuate',
                                        storage_backing=storage_backing,
                                        prev_host=target_host,
                                        **vms_info[vm_])
        vm_helper.ping_vms_from_natbox(vms)
def vms_with_upgrade():
    """
    Test test_vms_with_upgrade is for create various vms before upgrade

    Skip conditions:
        - Less than two hosts configured with storage backing under test

    Setups:
        - Add admin role to primary tenant (module)

    Test Steps:
        - Create flv_rootdisk without ephemeral or swap disks, and set storage backing extra spec
        - Create flv_ephemswap with ephemeral AND swap disks, and set storage backing extra spec
        - Boot following vms  and wait for them to be pingable from NatBox:
            - Boot vm1 from volume with flavor flv_rootdisk
            - Boot vm2 from volume with flavor flv_localdisk
            - Boot vm3 from image with flavor flv_rootdisk
            - Boot vm4 from image with flavor flv_rootdisk, and attach a volume to it
            - Boot vm5 from image with flavor flv_localdisk
            - start upgrade ....Follows upgrade procedure
            - Ping NAT during the upgrade before live migration
            - complete upgrade.

    Teardown:
        -  Not complete ....Delete created vms, volumes, flavors

    """
    ProjVar.set_var(SOURCE_OPENRC=True)
    Tenant.set_primary('tenant2')

    LOG.fixture_step("Create a flavor without ephemeral or swap disks")
    flavor_1 = nova_helper.create_flavor('flv_rootdisk')[1]
    ResourceCleanup.add('flavor', flavor_1)

    LOG.fixture_step("Create another flavor with ephemeral and swap disks")
    flavor_2 = nova_helper.create_flavor('flv_ephemswap',
                                         ephemeral=1,
                                         swap=512)[1]
    ResourceCleanup.add('flavor', flavor_2)

    LOG.fixture_step(
        "Boot vm1 from volume with flavor flv_rootdisk and wait for it pingable from NatBox"
    )
    vm1_name = "vol_root"
    vm1 = vm_helper.boot_vm(vm1_name, flavor=flavor_1, cleanup='function')[1]

    LOG.fixture_step(
        "Boot vm2 from volume with flavor flv_localdisk and wait for it pingable from NatBox"
    )
    vm2_name = "vol_ephemswap"
    vm2 = vm_helper.boot_vm(vm2_name, flavor=flavor_2, cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm1)
    vm_helper.wait_for_vm_pingable_from_natbox(vm2)

    LOG.fixture_step(
        "Boot vm3 from image with flavor flv_rootdisk and wait for it pingable from NatBox"
    )
    vm3_name = "image_root"
    vm3 = vm_helper.boot_vm(vm3_name, flavor=flavor_1, cleanup='function')[1]

    LOG.fixture_step(
        "Boot vm4 from image with flavor flv_rootdisk, attach a volume to it and wait for it "
        "pingable from NatBox")
    vm4_name = 'image_root_attachvol'
    vm4 = vm_helper.boot_vm(vm4_name, flavor_1, cleanup='function')[1]

    vol = cinder_helper.create_volume(bootable=False)[1]
    ResourceCleanup.add('volume', vol)
    vm_helper.attach_vol_to_vm(vm4, vol_id=vol)

    LOG.fixture_step(
        "Boot vm5 from image with flavor flv_localdisk and wait for it pingable from NatBox"
    )
    vm5_name = 'image_ephemswap'
    vm5 = vm_helper.boot_vm(vm5_name,
                            flavor_2,
                            source='image',
                            cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm4)
    vm_helper.wait_for_vm_pingable_from_natbox(vm5)

    vms = [vm1, vm2, vm3, vm4, vm5]
    return vms
Beispiel #22
0
def test_nova_actions(guest_os, cpu_pol, actions):
    """

    Args:
        guest_os:
        cpu_pol:
        actions:

    Test Steps:
        - Create a glance image from given guest type
        - Create a vm from cinder volume using above image with specified cpu
        policy
        - Perform given nova actions on vm
        - Ensure nova operation succeeded and vm still in good state (active
        and reachable from NatBox)

    """
    if guest_os == 'opensuse_12':
        if not cinder_helper.is_volumes_pool_sufficient(min_size=40):
            skip(SkipStorageSpace.SMALL_CINDER_VOLUMES_POOL)

    img_id = glance_helper.get_guest_image(guest_os=guest_os)

    LOG.tc_step("Create a flavor with 1 vcpu")
    flavor_id = nova_helper.create_flavor(name=cpu_pol, vcpus=1,
                                          root_disk=9)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if cpu_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: cpu_pol}
        LOG.tc_step("Add following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor=flavor_id, **specs)

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    vol_id = \
        cinder_helper.create_volume(name='vol-' + guest_os, source_id=img_id,
                                    guest_image=guest_os)[1]
    ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from above flavor and volume")
    vm_id = vm_helper.boot_vm('nova_actions',
                              flavor=flavor_id,
                              source='volume',
                              source_id=vol_id,
                              cleanup='function')[1]

    LOG.tc_step("Wait for VM pingable from NATBOX")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    for action in actions:
        if action == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, "
                "then verify ping from base vm over "
                "management and data networks")
            vm_helper.set_vm_state(vm_id=vm_id,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_id,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
        else:
            LOG.tc_step("Perform following action on vm {}: {}".format(
                vm_id, action))
            vm_helper.perform_action_on_vm(vm_id, action=action)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Beispiel #23
0
def pb_create_volumes(con_ssh,
                      volume_names=None,
                      volume_sizes=None,
                      backup_info=None):
    """
    Create volumes before doing System Backup.

    Args:
        con_ssh:
            - current ssh connection

        volume_names:
            - names of volumes to create

        volume_sizes:
            - sizes of volumes to create

        backup_info:
            - options for doing system backup

    Return:
        a dictionary of information for created volumes, including id, name, and size of volumes
    """
    LOG.info('Create VOLUMEs')

    if not volume_names:
        volume_names = ['vol_2G', 'vol_5G', 'vol_10G', 'vol_20G']

    if not volume_sizes:
        volume_sizes = [nm.split('_')[1][:-1] for nm in volume_names]
        if len(volume_sizes) < len(volume_names):
            volume_sizes = list(range(2, (2 + len(volume_names) * 2), 2))
            volume_sizes = volume_sizes[:len(volume_names) + 1]

    num_volumes, total_volume_size, per_volume_size = adjust_cinder_quota(
        con_ssh, len(volume_names), backup_info)

    volumes = {}
    count_volumes = 0
    if total_volume_size < 0:
        total_volume_size = 1 + sum([int(n) for n in volume_sizes])
    free_space = total_volume_size

    for name, size in zip(volume_names, volume_sizes):
        size = int(size)
        if 0 < per_volume_size < size:
            LOG.warn(
                'The size of requested VOLUME is bigger than allowed, abort, requested:{}, allowed:{}'
                .format(size, per_volume_size))
            continue

        free_space -= size
        if free_space <= 0:
            LOG.warn(
                'No more space in cinder-volumes for requested:{}, limit:{}, left free:{}'
                .format(size, total_volume_size, free_space))
            break

        LOG.info(
            '-OK, attempt to create volume of size:{:05.3f}, free space left:{:05.3f}'
            .format(size, free_space))
        volme_id = cinder_helper.create_volume(name=name,
                                               size=size,
                                               auth_info=Tenant.get('tenant1'))

        volumes.update({volme_id: {'name': name, 'size': size}})

        count_volumes += 1
        if 0 < num_volumes < count_volumes:
            LOG.info('Too many of volumes created, abort')
            break

    LOG.info('OK, created {} volumes, total size:{}, volumes:{}'.format(
        count_volumes, total_volume_size, volumes))
    return volumes
Beispiel #24
0
def _test_cpu_pol_dedicated_shared_coexists(vcpus_dedicated, vcpus_shared, pol_source, boot_source):
    """
    Test two vms coexisting on the same host, one with the dedicated cpu property, and one with the shared cpu property.

    Args:
        vcpus_dedicated: Amount of vcpu(s) to allocate for the vm with the dedicated CPU_POLICY.
        vcpus_shared: Amount of vcpu(s) to allocate for the vm with the shared CPU_POLICY.
        pol_source: Where the CPU_POLICY is set from.
        boot_source: The boot media the vm will use to boot.

    Test Setups:
        - Create two flavors, one for each vm.
        - If using 'flavor' for pol_source, set extra specs for the CPU_POLICY.
        - If using 'image' for pol_source, set ImageMetaData for the CPU_POLICY.
        - If using 'volume' for boot_source, create volume from tis image.
        - If using 'image' for boot_source, use tis image.
        - Determine the amount of free vcpu(s) on the compute before testing.

    Test Steps:
        - Boot the first vm with CPU_POLICY: dedicated.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Determine the amount of free vcpu(s) on the compute.
        - Boot the second vm with CPU_POLICY: shared.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Delete vms
        - Determine the amount of free vcpu(s) on the compute after testing.
        - Compare free vcpu(s) on the compute before and after testing, ensuring they are the same.

    Test Teardown
        - Delete created volumes and flavors
    """
    LOG.tc_step("Getting host list")
    target_hosts = host_helper.get_hypervisors(state='up')
    target_host = target_hosts[0]
    storage_backing = host_helper.get_host_instance_backing(host=target_host)
    if 'image' in storage_backing:
        storage_backing = 'local_image'
    elif 'remote' in storage_backing:
        storage_backing = 'remote'

    image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)
    pre_test_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    collection = ['dedicated', 'shared']
    vm_ids = []
    for x in collection:
        if x == 'dedicated':
            vcpus = vcpus_dedicated
        else:
            vcpus = vcpus_shared
        LOG.tc_step("Create {} flavor with {} vcpus".format(x, vcpus))
        flavor_id = nova_helper.create_flavor(name=x, vcpus=vcpus, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_id)

        if pol_source == 'flavor':
            LOG.tc_step("Set CPU_POLICY for {} flavor".format(x))
            specs = {FlavorSpec.CPU_POLICY: x}
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            LOG.tc_step("Create image with CPU_POLICY: {}".format(x))
            image_meta = {ImageMetadata.CPU_POLICY: x}
            image_id = glance_helper.create_image(name='cpu_pol_{}'.format(x), cleanup='function', **image_meta)[1]

        if boot_source == 'volume':
            LOG.tc_step("Create volume from image")
            source_id = cinder_helper.create_volume(name='cpu_pol_{}'.format(x), source_id=image_id)[1]
            ResourceCleanup.add('volume', source_id)
        else:
            source_id = image_id

        pre_boot_cpus = host_helper.get_vcpus_for_computes(field='used_now')
        LOG.tc_step("Booting cpu_pol_{}".format(x))
        vm_id = vm_helper.boot_vm(name='cpu_pol_{}'.format(x), flavor=flavor_id, source=boot_source,
                                  source_id=source_id, avail_zone='nova', vm_host=target_host, cleanup='function')[1]

        vm_ids.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, cpu_pol=x, vm_host=target_host,
                                          prev_total_cpus=pre_boot_cpus[target_host])

    LOG.tc_step("Deleting both dedicated and shared vms")
    vm_helper.delete_vms(vms=vm_ids)

    post_delete_cpus = host_helper.get_vcpus_for_computes(field='used_now')
    assert post_delete_cpus == pre_test_cpus, "vcpu count after test does not equal vcpu count before test"
Beispiel #25
0
def add_external_ceph(dest_filepath, ceph_services):
    """
    This function adds external ceph as a storage backend.  NOTE, this action
    cannot be undone.

    Arguments:
    - Takes the path to the external ceph.conf file as an argument.

    Returns:
    - Nothing

    Test Steps:
    - Check if external ceph has already been added.  If so, skip.
    - Otherwise, add external ceph
    - Once external ceph is added, controllers will go into configuring state
    - Wait until controllers are configured
    - Check that the cinder service list now includes external ceph
    - Launch a volume and ensure it now defaults to the external ceph backend
    - Volume clean up done via resource clean-up
    - Now system is ready to be used
    """

    LOG.tc_step('Check if external ceph has already been added')
    backend_provisioned = check_external_ceph()
    if backend_provisioned:
        skip('External ceph backend already configured')

    # User may provision all or a subset of ceph services to be external
    ceph_pools = ""
    if 'cinder' in ceph_services:
        ceph_pools += ' cinder_pool=cinder-volumes'
    if 'glance' in ceph_services:
        ceph_pools += ' glance_pool=images'
    if 'nova' in ceph_services:
        ceph_pools += ' ephemeral_pool=ephemeral'
    ceph_name = 'ceph-external'
    ceph_params = ",".join(ceph_services)

    LOG.tc_step('Add the external ceph backend')
    cmd = "storage-backend-add -s {} -n {} -c {} {} {}".format(
        ceph_params, ceph_name, dest_filepath, ceph_name, ceph_pools)
    cli.system(cmd)

    LOG.tc_step('Wait for the storage backend to go into configuring state')
    storage_helper.wait_for_storage_backend_vals(
        ceph_name, **{'state': BackendState.CONFIGURING})

    LOG.tc_step('Wait for the storage backend to become configured')
    storage_helper.wait_for_storage_backend_vals(
        ceph_name, **{'state': BackendState.CONFIGURED})

    # Need to confirm if we actually had a config out-of-date alarm

    LOG.tc_step('Check the expected cinder type is added')
    assert check_cinder_type(cinder_type="ceph-external-ceph-external"
                             ), "External ceph cinder type was not found"

    LOG.tc_step(
        'Launch a volume and ensure it is created in the external ceph backend'
    )
    vol_id = cinder_helper.create_volume(cleanup="function")[1]
    volume_type = cinder_helper.get_volume_show_values(
        vol_id, fields='os-vol-host-attr:host')
    assert volume_type == 'controller@ceph-external#ceph-external', "Volume created in wrong backend"
Beispiel #26
0
def test_vm_with_config_drive(hosts_per_stor_backing):
    """
    Skip Condition:
        - no host with local_image backend

    Test Steps:
        - Launch a vm using config drive
        - Add test data to config drive on vm
        - Do some operations (reboot vm for simplex, cold migrate and lock host for non-simplex) and
            check test data persisted in config drive after each operation
    Teardown:
        - Delete created vm, volume, flavor

    """
    guest_os = 'cgcs-guest'
    # guest_os = 'tis-centos-guest'  # CGTS-6782
    img_id = glance_helper.get_guest_image(guest_os)
    hosts_num = len(hosts_per_stor_backing.get('local_image', []))
    if hosts_num < 1:
        skip("No host with local_image storage backing")

    volume_id = cinder_helper.create_volume(name='vol_inst1',
                                            source_id=img_id,
                                            guest_image=guest_os)[1]
    ResourceCleanup.add('volume', volume_id, scope='function')

    block_device = {
        'source': 'volume',
        'dest': 'volume',
        'id': volume_id,
        'device': 'vda'
    }
    vm_id = vm_helper.boot_vm(name='config_drive',
                              config_drive=True,
                              block_device=block_device,
                              cleanup='function',
                              guest_os=guest_os,
                              meta={'foo': 'bar'})[1]

    LOG.tc_step("Confirming the config drive is set to True in vm ...")
    assert str(vm_helper.get_vm_values(vm_id, "config_drive")[0]) == 'True', \
        "vm config-drive not true"

    LOG.tc_step("Add date to config drive ...")
    check_vm_config_drive_data(vm_id)

    vm_host = vm_helper.get_vm_host(vm_id)
    instance_name = vm_helper.get_vm_instance_name(vm_id)
    LOG.tc_step("Check config_drive vm files on hypervisor after vm launch")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)

    if not system_helper.is_aio_simplex():
        LOG.tc_step("Cold migrate VM")
        vm_helper.cold_migrate_vm(vm_id)

        LOG.tc_step("Check config drive after cold migrate VM...")
        check_vm_config_drive_data(vm_id)

        LOG.tc_step("Lock the compute host")
        compute_host = vm_helper.get_vm_host(vm_id)
        HostsToRecover.add(compute_host)
        host_helper.lock_host(compute_host, swact=True)

        LOG.tc_step("Check config drive after locking VM host")
        check_vm_config_drive_data(vm_id, ping_timeout=VMTimeout.DHCP_RETRY)
        vm_host = vm_helper.get_vm_host(vm_id)

    else:
        LOG.tc_step("Reboot vm")
        vm_helper.reboot_vm(vm_id)

        LOG.tc_step("Check config drive after vm rebooted")
        check_vm_config_drive_data(vm_id)

    LOG.tc_step("Check vm files exist after nova operations")
    check_vm_files_on_hypervisor(vm_id,
                                 vm_host=vm_host,
                                 instance_name=instance_name)
Beispiel #27
0
def test_cpu_realtime_vm_actions(vcpus, cpu_rt, rt_mask, rt_source, shared_vcpu, numa_nodes, cpu_thread, check_hosts):
    """
    Test vm with realtime cpu policy specified in flavor
    Args:
        vcpus (int):
        cpu_rt (str|None):
        rt_source (str): flavor or image
        rt_mask (str):
        shared_vcpu (int|None):min_vcpus
        numa_nodes (int|None): number of numa_nodes to boot vm on
        cpu_thread
        check_hosts (tuple): test fixture

    Setups:
        - check storage backing and whether system has shared cpu configured

    Test Steps:
        - Create a flavor with given cpu realtime, realtime mask and shared vcpu extra spec settings
        - Create a vm with above flavor
        - Verify cpu scheduler policies via virsh dumpxml and ps
        - Perform following nova actions and repeat above step after each action:
            ['suspend', 'resume'],
            ['live_migrate'],
            ['cold_migrate'],
            ['rebuild']

    """
    storage_backing, hosts_with_shared_cpu, ht_hosts = check_hosts

    if cpu_thread == 'require' and len(ht_hosts) < 2:
        skip("Less than two hyperthreaded hosts")

    if shared_vcpu is not None and len(hosts_with_shared_cpu) < 2:
        skip("Less than two up hypervisors configured with shared cpu")

    cpu_rt_flv = cpu_rt
    if rt_source == 'image':
        # rt_mask_flv = cpu_rt_flv = None
        rt_mask_flv = '^0'
        rt_mask_img = rt_mask
    else:
        rt_mask_flv = rt_mask
        rt_mask_img = None

    image_id = None
    if rt_mask_img is not None:
        image_metadata = {ImageMetadata.CPU_RT_MASK: rt_mask_img}
        image_id = glance_helper.create_image(name='rt_mask', cleanup='function', **image_metadata)[1]

    vol_id = cinder_helper.create_volume(source_id=image_id)[1]
    ResourceCleanup.add('volume', vol_id)

    name = 'rt-{}_mask-{}_{}vcpu'.format(cpu_rt, rt_mask_flv, vcpus)
    flv_id = create_rt_flavor(vcpus, cpu_pol='dedicated', cpu_rt=cpu_rt_flv, rt_mask=rt_mask_flv,
                              shared_vcpu=shared_vcpu, numa_nodes=numa_nodes, cpu_thread=cpu_thread,
                              storage_backing=storage_backing)[0]

    LOG.tc_step("Boot a vm with above flavor")
    vm_id = vm_helper.boot_vm(name=name, flavor=flv_id, cleanup='function', source='volume', source_id=vol_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    expt_rt_cpus, expt_ord_cpus = parse_rt_and_ord_cpus(vcpus=vcpus, cpu_rt=cpu_rt, cpu_rt_mask=rt_mask)

    check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu)
    vm_host = vm_helper.get_vm_host(vm_id)
    if shared_vcpu:
        assert vm_host in hosts_with_shared_cpu

    numa_num = 1 if numa_nodes is None else numa_nodes
    check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, vm_host=vm_host)

    expt_current_cpu = vcpus
    # if min_vcpus is not None:
    #     GuestLogs.add(vm_id)
    #     LOG.tc_step("Scale down cpu once")
    #     vm_helper.scale_vm(vm_id, direction='down', resource='cpu')
    #     vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    #
    #     LOG.tc_step("Check current vcpus in nova show is reduced after scale down")
    #     expt_current_cpu -= 1
    #     check_helper.check_vm_vcpus_via_nova_show(vm_id, min_vcpus, expt_current_cpu, vcpus)

    for actions in [['suspend', 'resume'], ['stop', 'start'], ['live_migrate'], ['cold_migrate'], ['rebuild']]:
        LOG.tc_step("Perform {} on vm and check realtime cpu policy".format(actions))
        for action in actions:
            kwargs = {}
            if action == 'rebuild':
                kwargs = {'image_id': image_id}
            vm_helper.perform_action_on_vm(vm_id, action=action, **kwargs)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host_post_action = vm_helper.get_vm_host(vm_id)
        if shared_vcpu:
            assert vm_host_post_action in hosts_with_shared_cpu

        LOG.tc_step("Check cpu thread policy in vm topology and vcpus in nova show after {}".format(actions))
        check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, numa_num=numa_num,
                                          vm_host=vm_host_post_action, current_vcpus=expt_current_cpu)

        check_virsh = True
        offline_cpu = None

        check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu,
                                               offline_cpus=offline_cpu, check_virsh_vcpusched=check_virsh)
Beispiel #28
0
def test_create_snapshot_using_boot_from_volume_vm():
    """
    This test creates a snapshot from a VM that is booted from volume using
    nova image-create.  Nova image-create will create a glance image that can
    be used to boot a VM, but the snapshot seen in glance will be empty, since
    the real image is stored in cinder.

    Test Steps:
    1.  Run cinder create --image <img-uuid> --size <size> <bootable_vol>
    2.  Boot a VM using the bootable volume
    3.  Run nova image-create <vm-id> <name> to save a snapshot of the vm
    4.  Run cinder snapshot-list to list the snapshot of the VM
    5.  Run cinder create --snapshot-id <snapshot-from-VM> --name <vol-name>
<size>
    6.  Run cinder upload-to-image <vol-uuid> <image-name> to create a image
    7.  Glance image-download to download the snapshot.

    Teardown:
    1.  Delete VMs
    2.  Delete volumes
    3.  Delete snapshots

    Possible Improvements:
    1.  Could update test to use non-raw images, but determining size of of
    image is more complex if the original file is no longer on the filesystem.
    """

    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step("Get available images")
    image_list = glance_helper.get_images()

    if len(image_list) == 0:
        skip("The test requires some images to be present")

    # Filter out zero-sized images and non-raw images (latter is lazy)
    image_uuid = vol_size = None
    for image in image_list:
        image_uuid = image
        image_prop_s, image_prop_d = glance_helper.get_image_values(
            image_uuid, ("size", "disk_format"))
        if str(image_prop_s) == "0" or image_prop_d != "raw":
            continue
        else:
            divisor = 1024 * 1024 * 1024
            image_size = int(image_prop_s)
            vol_size = int(math.ceil(image_size / divisor))
            break
    else:
        skip("No usable images found")

    LOG.tc_step("Create a cinder bootable volume")
    # Check if lab has emc-vnx volume types. Use volume type = iscsi;
    # Creating snapshot with emc-vnx(EMS San)
    # is not supported yet.
    volume_types = cinder_helper.get_volume_types(field='Name')
    vol_type = 'iscsi' if any('emc' in t for t in volume_types) else None
    vol_id = cinder_helper.create_volume(source_id=image_uuid,
                                         vol_type=vol_type,
                                         size=vol_size,
                                         fail_ok=False,
                                         cleanup='function')[1]

    LOG.tc_step("Boot VM using newly created bootable volume")
    vm_id = vm_helper.boot_vm(source="volume",
                              source_id=vol_id,
                              cleanup='function')[1]
    vm_name = vm_helper.get_vm_name_from_id(vm_id)
    snapshot_name = vm_name + "_snapshot"

    # nova image-create generates a glance image of 0 size
    # real snapshot is stored in cinder
    LOG.tc_step("Create a snapshot based on that VM")
    code, image_id, snapshot_id = vm_helper.create_image_from_vm(
        vm_id,
        image_name=snapshot_name,
        cleanup='function',
        expt_cinder_snapshot=True)

    vol_name = "vol_from_snapshot"
    # Creates volume from snapshot
    LOG.tc_step("Create cinder volume from vm snapshot")
    snapshot_vol_id = cinder_helper.create_volume(name=vol_name,
                                                  source_id=snapshot_id,
                                                  source_type='snapshot',
                                                  cleanup='function')[1]

    # Creates an image
    LOG.tc_step("Upload cinder volume to image")
    image_name = "cinder_upload"
    img_from_vol_id = glance_helper.create_image(name=image_name,
                                                 volume=snapshot_vol_id,
                                                 auth_info=None,
                                                 cleanup='function')[1]

    image_filename = '{}/images/temp'.format(HostLinuxUser.get_home())
    LOG.tc_step("Download the image snapshot")
    cmd = "image save --file {} {}".format(image_filename, image_id)
    cli.openstack(cmd, ssh_client=con_ssh, fail_ok=False)

    # Downloading should be good enough for validation.  If the file is
    # zero-size, download will report failure.
    LOG.tc_step("Delete the downloaded image")
    con_ssh.exec_cmd("rm {}".format(image_filename), fail_ok=False)

    LOG.tc_step('Delete uploaded image')
    glance_helper.delete_images(images=img_from_vol_id)

    LOG.tc_step('Delete created cinder volume from vm snapshot')
    cinder_helper.delete_volumes(snapshot_vol_id)

    LOG.tc_step(
        'Delete cinder snapshot and image snapshot from boot-from-volume vm')
    glance_helper.delete_images(image_id)
    cinder_helper.delete_volume_snapshots(snapshots=snapshot_id)
Beispiel #29
0
def test_interface_attach_detach_max_vnics(guest_os, if_attach_arg, vifs,
                                           check_avs_pattern, base_vm):
    """
    Sample test case for interface attach/detach to maximum vnics

    Setups:
        - Boot a base vm with mgmt net and internal0-net1   (module)

    Test Steps:
        - Boot a vm with only mgmt interface
        - Attach an vifs to vm with given if_attach_arg and vif_model
        - Bring up the interface from vm
        - ping between base_vm and vm_under_test over mgmt & tenant network
        - Perform VM action - Cold migrate, live migrate, pause resume, suspend resume
        - Verify ping between base_vm and vm_under_test over mgmt & tenant network after vm operation
        - detach all the tenant interface
        - Repeat attach/detach after performing each vm action

    Teardown:
        - Delete created vm, volume, port (if any)  (func)
        - Delete base vm, volume    (module)

    """
    if guest_os == 'vxworks' and not system_helper.is_avs():
        skip('e1000 vif unsupported by OVS')

    base_vm_id, mgmt_nic, tenant_nic, internal_net_id, tenant_net_id, mgmt_net_id = base_vm

    glance_vif = None
    if not (if_attach_arg == 'port_id' and system_helper.is_avs()):
        for vif in vifs:
            if vif[0] in ('e1000', 'rtl8139'):
                glance_vif = vif[0]
                break

    LOG.tc_step("Get/Create {} glance image".format(guest_os))
    cleanup = None if (not glance_vif and re.search(
        GuestImages.TIS_GUEST_PATTERN, guest_os)) else 'function'
    image_id = glance_helper.get_guest_image(
        guest_os=guest_os,
        cleanup=cleanup,
        use_existing=False if cleanup else True)

    if glance_vif:
        glance_helper.set_image(image_id,
                                hw_vif_model=glance_vif,
                                new_name='{}_{}'.format(guest_os, glance_vif))

    LOG.tc_step("Create a flavor with 2 vcpus")
    flavor_id = nova_helper.create_flavor(vcpus=1,
                                          guest_os=guest_os,
                                          cleanup='function')[1]

    LOG.tc_step("Create a volume from {} image".format(guest_os))
    code, vol_id = cinder_helper.create_volume(name='vol-' + guest_os,
                                               source_id=image_id,
                                               fail_ok=True,
                                               guest_image=guest_os,
                                               cleanup='function')
    assert 0 == code, "Issue occurred when creating volume"
    source_id = vol_id

    LOG.tc_step("Boot a vm with mgmt nic only")
    vm_under_test = vm_helper.boot_vm(name='if_attach_tenant',
                                      nics=[mgmt_nic],
                                      source_id=source_id,
                                      flavor=flavor_id,
                                      guest_os=guest_os,
                                      cleanup='function')[1]
    prev_port_count = 1
    for vm_actions in [['live_migrate'], ['cold_migrate'],
                       ['pause', 'unpause'], ['suspend', 'resume'],
                       ['stop', 'start']]:
        tenant_port_ids = []
        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Attach specified vnics to the VM before {} and bring up interfaces"
                .format(vm_actions))
            expt_vnics = 1
            for vif in vifs:
                vif_model, vif_count = vif
                expt_vnics += vif_count
                LOG.info("iter {}".format(vif_count))
                for i in range(vif_count):
                    if if_attach_arg == 'port_id':
                        vif_model = vif_model if system_helper.is_avs(
                        ) else None
                        port = network_helper.create_port(
                            net_id=tenant_net_id,
                            wrs_vif=vif_model,
                            cleanup='function',
                            name='attach_{}_{}'.format(vif_model, i))[1]
                        kwargs = {'port_id': port}
                    else:
                        kwargs = {'net_id': tenant_net_id}
                    tenant_port_id = vm_helper.attach_interface(
                        vm_under_test, **kwargs)[1]
                    tenant_port_ids.append(tenant_port_id)
                LOG.info(
                    "Attached new vnics to the VM {}".format(tenant_port_ids))

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            LOG.info("vnics attached to VM: {}".format(vm_ports_count))
            assert vm_ports_count == expt_vnics, "vnics attached is not equal to max number."

            LOG.info(
                "Bring up all the attached new vifs {} on tenant net from vm".
                format(vifs))
            _bring_up_attached_interface(vm_under_test,
                                         ports=tenant_port_ids,
                                         guest_os=guest_os,
                                         base_vm=base_vm_id)

            if expt_vnics == 16:
                LOG.tc_step(
                    "Verify no more vnic can be attached after reaching upper limit 16"
                )
                res = vm_helper.attach_interface(vm_under_test,
                                                 net_id=tenant_net_id,
                                                 fail_ok=True)[0]
                assert res == 1, "vnics attach exceed maximum limit"

        if vm_actions[0] == 'auto_recover':
            LOG.tc_step(
                "Set vm to error state and wait for auto recovery complete, then verify ping from "
                "base vm over management and data networks")
            vm_helper.set_vm_state(vm_id=vm_under_test,
                                   error_state=True,
                                   fail_ok=False)
            vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                         status=VMStatus.ACTIVE,
                                         fail_ok=True,
                                         timeout=600)
            # if 'vxworks' not in guest_os:
            #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)
        else:
            LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                vm_under_test, vm_actions))
            for action in vm_actions:
                vm_helper.perform_action_on_vm(vm_under_test, action=action)
                if action == 'cold_migrate' or action == 'start':
                    LOG.tc_step(
                        "Bring up all the attached tenant interface from vm after {}"
                        .format(vm_actions))
                    # if 'vxworks' not in guest_os:
                    #     _bring_up_attached_interface(vm_under_test, guest_os=guest_os, num=new_vnics)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test)

        if 'vxworks' not in guest_os:
            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_id,
                                       net_types=['mgmt', 'data'],
                                       retry=10)

            LOG.tc_step("Detach all attached interface {} after {}".format(
                tenant_port_ids, vm_actions))
            for tenant_port_id in tenant_port_ids:
                vm_helper.detach_interface(vm_id=vm_under_test,
                                           port_id=tenant_port_id,
                                           cleanup_route=True)

            vm_ports_count = len(
                network_helper.get_ports(server=vm_under_test))
            assert prev_port_count == vm_ports_count, "VM ports still listed after interface-detach"
            res = vm_helper.ping_vms_from_vm(to_vms=base_vm_id,
                                             from_vm=vm_under_test,
                                             fail_ok=True,
                                             net_types=['data'],
                                             retry=0)[0]
            assert not res, "Detached interface still works"
Beispiel #30
0
def test_instantiate_a_vm_with_multiple_volumes_and_migrate():
    """
    Test  a vm with a multiple volumes live, cold  migration and evacuation:

    Test Setups:
    - get guest image_id
    - get or create 'small' flavor_id
    - get tenenat and managment network ids

    Test Steps:
    - create volume for boot and another extra size 8GB
    - boot vms from the created volume
    - Validate that VMs boot, and that no timeouts or error status occur.
    - Verify VM status is ACTIVE
    - Attach the second volume to VM
    - Attempt live migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Attempt cold migrate  VM
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Reboot the compute host to initiate evacuation
    - Login to VM and verify the filesystem is rw mode on both volumes
    - Terminate VMs

    Skip conditions:
    - less than two computes
    - less than one storage

    """
    # skip("Currently not working. Centos image doesn't see both volumes")
    LOG.tc_step("Creating a volume size=8GB.....")
    vol_id_0 = cinder_helper.create_volume(size=8)[1]
    ResourceCleanup.add('volume', vol_id_0, scope='function')

    LOG.tc_step("Creating a second volume size=8GB.....")
    vol_id_1 = cinder_helper.create_volume(size=8, bootable=False)[1]
    LOG.tc_step("Volume id is: {}".format(vol_id_1))
    ResourceCleanup.add('volume', vol_id_1, scope='function')

    LOG.tc_step("Booting instance vm_0...")

    vm_id = vm_helper.boot_vm(name='vm_0',
                              source='volume',
                              source_id=vol_id_0,
                              cleanup='function')[1]
    time.sleep(5)

    LOG.tc_step("Verify  VM can be pinged from NAT box...")
    rc, boot_time = check_vm_boot_time(vm_id)
    assert rc, "VM is not pingable after {} seconds ".format(boot_time)

    LOG.tc_step("Login to VM and to check filesystem is rw mode....")
    assert is_vm_filesystem_rw(
        vm_id), 'vol_0 rootfs filesystem is not RW as expected.'

    LOG.tc_step("Attemping to attach a second volume to VM...")
    vm_helper.attach_vol_to_vm(vm_id, vol_id_1)

    LOG.tc_step(
        "Login to VM and to check filesystem is rw mode for both volumes....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'volumes rootfs ' \
                                                       'filesystem is not RW ' \
                                                       'as expected.'

    LOG.tc_step("Attemping live migrate VM...")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After live migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'

    LOG.tc_step("Attempting  cold migrate VM...")
    vm_helper.cold_migrate_vm(vm_id)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After cold migration ' \
                                                       'rootfs filesystem is ' \
                                                       'not RW'
    LOG.tc_step("Testing VM evacuation.....")
    before_host_0 = vm_helper.get_vm_host(vm_id)

    LOG.tc_step("Rebooting compute {} to initiate vm evacuation .....".format(
        before_host_0))
    vm_helper.evacuate_vms(host=before_host_0,
                           vms_to_check=vm_id,
                           ping_vms=True)

    LOG.tc_step("Login to VM and to check filesystem is rw mode after live "
                "migration....")
    assert is_vm_filesystem_rw(vm_id, rootfs=['vda',
                                              'vdb']), 'After evacuation ' \
                                                       'filesystem is not RW'