Ejemplo n.º 1
0
def test_db_purge():

    end_time = time.time() + 7200

    count = 1
    while time.time() < end_time:

        LOG.tc_step(
            "Iteration-{}: Creating and deleting image, volume, vm".format(
                count))
        LOG.info("------ Creating image, volume, vm")
        image_id = glance_helper.create_image(
            name='glance-purge',
            cleanup='function',
            **{ImageMetadata.AUTO_RECOVERY: 'true'})[1]
        vol_id = cinder_helper.create_volume(name='cinder-purge',
                                             source_id=image_id)[1]
        vm_id = vm_helper.boot_vm(name='nova-purge',
                                  source='volume',
                                  source_id=vol_id)[1]

        time.sleep(60)

        LOG.info("------ Deleting vm, volume, image")
        vm_helper.delete_vms(vms=vm_id)
        cinder_helper.delete_volumes(volumes=vol_id)
        glance_helper.delete_images(images=image_id)

        time.sleep(60)
        count += 1
Ejemplo n.º 2
0
def __create_image(img_os, scope):
    if not img_os:
        img_os = GuestImages.DEFAULT['guest']

    LOG.fixture_step(
        "({}) Get or create a glance image with {} guest OS".format(
            scope, img_os))
    img_info = GuestImages.IMAGE_FILES[img_os]
    img_id = glance_helper.get_image_id_from_name(img_os, strict=True)
    if not img_id:
        if img_info[0] is not None:
            image_path = glance_helper.scp_guest_image(img_os=img_os)
        else:
            img_dir = GuestImages.DEFAULT['image_dir']
            image_path = "{}/{}".format(img_dir, img_info[2])

        disk_format = 'raw' if img_os in [
            'cgcs-guest', 'tis-centos-guest', 'vxworks'
        ] else 'qcow2'
        img_id = glance_helper.create_image(name=img_os,
                                            source_image_file=image_path,
                                            disk_format=disk_format,
                                            container_format='bare',
                                            cleanup=scope)[1]

    return img_id
def create_image_for_metrics():
    """
    Create an image that will be used by tests in the suite (with or without properties)
    to be used to launch Cirros instances
    """
    image_id = glance_helper.create_image(
        name=cirros_params['image_name'],
        source_image_file=cirros_params['image_file'],
        disk_format=cirros_params['disk_format'],
        cleanup="module")[1]
    return image_id
Ejemplo n.º 4
0
def test_heat_template(template_name, revert_quota):
    """
    Basic Heat template testing:
        various Heat templates.

    Args:
        template_name (str): e.g, OS_Cinder_Volume.
        revert_quota (dict): test fixture to revert network quota.

    =====
    Prerequisites (skip test if not met):
        - at least two hypervisors hosts on the system

    Test Steps:
        - Create a heat stack with the given template
        - Verify heat stack is created successfully
        - Verify heat resources are created
        - Delete Heat stack and verify resource deletion

    """
    if 'QoSPolicy' in template_name:
        if not system_helper.is_avs():
            skip("QoS policy is not supported by OVS")

    elif template_name == 'OS_Neutron_RouterInterface.yaml':
        LOG.tc_step("Increase network quota by 2 for every tenant")
        tenants_quotas = revert_quota
        for tenant_id, quotas in tenants_quotas.items():
            network_quota, subnet_quota = quotas
            vm_helper.set_quotas(tenant=tenant_id,
                                 networks=network_quota + 10,
                                 subnets=subnet_quota + 10)

    elif template_name == 'OS_Nova_Server.yaml':
        # create new image to do update later
        LOG.tc_step("Creating an Image to be used for heat update later")
        glance_helper.create_image(name='tis-centos2', cleanup='function')

    # add test step
    verify_basic_template(template_name)
Ejemplo n.º 5
0
def create_image_with_metadata(guest_os, property_key, values, disk_format,
                               container_format):
    """
    Create image with given metadata/property.

    Args:
        guest_os:
        property_key (str): the key for the property, such as sw_wrs_auto_recovery
        values (list): list of values to test for the specific key
        disk_format (str): such as 'raw', 'qcow2'
        container_format (str): such as bare

    Test Steps;
        - Create image with given disk format, container format, property key and value pair
        - Verify property value is correctly set via glance image-show

    Returns: List of image ids


    """
    image_ids = []

    for value in values:
        LOG.tc_step(
            "Creating image with property {}={}, disk_format={}, container_format={}"
            .format(property_key, value, disk_format, container_format))
        image_name = GuestImages.IMAGE_FILES[guest_os][0]
        image_name = str(image_name) + "_auto"
        img_id = glance_helper.get_image_id_from_name(image_name, strict=True)
        if not img_id:
            image_path = glance_helper.scp_guest_image(img_os=guest_os)

            image_id = glance_helper.create_image(
                source_image_file=image_path,
                cleanup='function',
                disk_format=disk_format,
                container_format=container_format,
                **{property_key: value})[1]
            image_ids.append(image_id)

            LOG.tc_step(
                "Verify image property is set correctly via glance image-show."
            )
            actual_property_val = glance_helper.get_image_properties(
                image_id, property_key)[0]
            assert value.lower() == actual_property_val.lower(), \
                "Actual image property {} value - {} is different than set value - {}".format(
                    property_key, actual_property_val, value)
        else:
            image_ids.append(img_id)

    return image_ids
Ejemplo n.º 6
0
def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, expt_err):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if flv_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: flv_pol}

        LOG.tc_step("Set following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor_id, **specs)

    if img_pol is not None:
        image_meta = {ImageMetadata.CPU_POLICY: img_pol}
        LOG.tc_step("Create image with following metadata: {}".format(image_meta))
        image_id = glance_helper.create_image(name='cpu_pol_{}'.format(img_pol), cleanup='function', **image_meta)[1]
    else:
        image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)

    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol_img', source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format(boot_source))
    code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, source=boot_source,
                                         source_id=source_id, fail_ok=True, cleanup='function')

    # check for negative tests
    if expt_err is not None:
        LOG.tc_step("Check VM failed to boot due to conflict in flavor and image.")
        assert 4 == code, "Expect boot vm cli reject and no vm booted. Actual: {}".format(msg)
        assert eval(expt_err) in msg, "Expected error message is not found in cli return."
        return  # end the test for negative cases

    # Check for positive tests
    LOG.tc_step("Check vm is successfully booted.")
    assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg)

    # Calculate expected policy:
    expt_cpu_pol = flv_pol if flv_pol else img_pol
    expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared'

    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=expt_cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Ejemplo n.º 7
0
def _boot_multiports_vm(flavor,
                        mgmt_net_id,
                        vifs,
                        net_id,
                        net_type,
                        base_vm,
                        pcipt_seg_id=None):
    nics = [{'net-id': mgmt_net_id}]

    nics, glance_vif = _append_nics_for_net(vifs, net_id=net_id, nics=nics)
    img_id = None
    if glance_vif:
        img_id = glance_helper.create_image(name=glance_vif,
                                            hw_vif_model=glance_vif,
                                            cleanup='function')[1]

    LOG.tc_step(
        "Boot a test_vm with following nics on same networks as base_vm: {}".
        format(nics))
    vm_under_test = vm_helper.boot_vm(name='multiports',
                                      nics=nics,
                                      flavor=flavor,
                                      cleanup='function',
                                      image_id=img_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

    if pcipt_seg_id:
        LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
        vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                   net_seg_id=pcipt_seg_id,
                                                   init_conf=True)

    LOG.tc_step("Ping test_vm's own {} network ips".format(net_type))
    vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                               from_vm=vm_under_test,
                               net_types=net_type)

    vm_helper.configure_vm_vifs_on_same_net(vm_id=vm_under_test)

    LOG.tc_step(
        "Ping test_vm from base_vm to verify management and data networks connection"
    )
    vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                               from_vm=base_vm,
                               net_types=['mgmt', net_type])

    return vm_under_test, nics
Ejemplo n.º 8
0
def admin_images_pg(admin_home_pg_container, request):
    LOG.fixture_step('Go to Project > Compute > Images')
    image_name = helper.gen_resource_name('image')
    images_pg = imagespage.ImagesPage(admin_home_pg_container.driver,
                                      port=admin_home_pg_container.port)
    images_pg.go_to_target_page()
    image_id = glance_helper.create_image(image_name)[1]
    image_name = glance_helper.get_image_values(image_id, 'Name')[0]

    def teardown():
        LOG.fixture_step('Back to Images page')
        images_pg.go_to_target_page()
        LOG.fixture_step('Delete image {}'.format(image_name))
        images_pg.delete_image(image_name)

    request.addfinalizer(teardown)
    return images_pg, image_name, image_id
Ejemplo n.º 9
0
def create_flavors_and_images(request):
    # TODO need to check with add_default_specs set to True on baremetal
    LOG.fixture_step("Creating flavor and image")
    fl_id = nova_helper.create_flavor(name=request.param['flavor_name'],
                                      vcpus=request.param['flavor_vcpus'],
                                      ram=request.param['flavor_ram'],
                                      root_disk=request.param['flavor_disk'],
                                      properties=request.param['properties'],
                                      is_public=True,
                                      add_default_specs=False,
                                      cleanup="module")[1]
    LOG.error(request.param['image_file'])
    im_id = glance_helper.create_image(
        name=request.param['image_name'],
        source_image_file=request.param['image_file'],
        disk_format=request.param['disk_format'],
        cleanup="module")[1]
    return {"flavor": fl_id, "image": im_id}
Ejemplo n.º 10
0
def _boot_vm_vcpu_model(flv_model=None,
                        img_model=None,
                        boot_source='volume',
                        avail_zone=None,
                        vm_host=None):
    LOG.tc_step(
        "Attempt to launch vm from {} with image vcpu model metadata: {}; flavor vcpu model extra spec: {}"
        .format(boot_source, img_model, flv_model))

    flv_id = nova_helper.create_flavor(name='vcpu_{}'.format(flv_model))[1]
    ResourceCleanup.add('flavor', flv_id)
    if flv_model:
        nova_helper.set_flavor(flavor=flv_id,
                               **{FlavorSpec.VCPU_MODEL: flv_model})

    if img_model:
        image_id = glance_helper.create_image(
            name='vcpu_{}'.format(img_model),
            cleanup='function',
            **{ImageMetadata.CPU_MODEL: img_model})[1]
    else:
        image_id = glance_helper.get_guest_image(
            guest_os=GuestImages.DEFAULT['guest'])

    if boot_source == 'image':
        source_id = image_id
    else:
        source_id = cinder_helper.create_volume(name='vcpu_model',
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)

    code, vm, msg = vm_helper.boot_vm(name='vcpu_model',
                                      flavor=flv_id,
                                      source=boot_source,
                                      source_id=source_id,
                                      fail_ok=True,
                                      cleanup='function',
                                      avail_zone=avail_zone,
                                      vm_host=vm_host)
    return code, vm, msg
Ejemplo n.º 11
0
def create_flavour_and_image():
    fl_id = nova_helper.create_flavor(name=cirros_params['flavor_name_1'],
                                      vcpus=cirros_params['flavor_vcpus'],
                                      ram=cirros_params['flavor_ram'],
                                      root_disk=cirros_params['flavor_disk'],
                                      properties=cirros_params['properties'], is_public=True,
                                      add_default_specs=False, cleanup="module")[1]
    fl_id_2 = nova_helper.create_flavor(name=cirros_params["flavor_name_2"],
                                        vcpus=cirros_params["flavor_vcpus"],
                                        ram=cirros_params["flavor_ram"],
                                        root_disk=cirros_params["flavor_disk"],
                                        properties=cirros_params["properties"], is_public=True,
                                        add_default_specs=False, cleanup="module")[1]
    im_id = glance_helper.create_image(name=cirros_params['image_name'],
                                       source_image_file=cirros_params['image_file'],
                                       disk_format=cirros_params['disk_format'],
                                       cleanup="module")[1]
    return {
        "flavor1": fl_id,
        "flavor2": fl_id_2,
        "image": im_id
    }
Ejemplo n.º 12
0
def test_autorecovery_image_metadata_in_volume(auto_recovery, disk_format, container_format):
    """
    Create image with given metadata/property.

    Args:
        auto_recovery (str): value for sw_wrs_auto_recovery to set in image
        disk_format (str): such as 'raw', 'qcow2'
        container_format (str): such as bare

    Test Steps;
        - Create image with given disk format, container format, property key and value pair
        - Verify property value is correctly set via glance image-show

    Teardown:
        - Delete created images

    """
    property_key = ImageMetadata.AUTO_RECOVERY

    LOG.tc_step("Create an image with property auto_recovery={}, disk_format={}, container_format={}".
                format(auto_recovery, disk_format, container_format))
    image_id = glance_helper.create_image(disk_format=disk_format, container_format=container_format,
                                          cleanup='function', **{property_key: auto_recovery})[1]

    LOG.tc_step("Create a volume from the image")
    vol_id = cinder_helper.create_volume(name='auto_recov', source_id=image_id, cleanup='function')[1]

    LOG.tc_step("Verify image properties are shown in cinder list")
    field = 'volume_image_metadata'
    vol_image_metadata_dict = cinder_helper.get_volume_show_values(vol_id, fields=field)[0]
    LOG.info("vol_image_metadata dict: {}".format(vol_image_metadata_dict))

    assert auto_recovery.lower() == vol_image_metadata_dict[property_key].lower(), \
        "Actual volume image property {} value - {} is different than value set in image - {}".format(
                property_key, vol_image_metadata_dict[property_key], auto_recovery)

    assert disk_format == vol_image_metadata_dict['disk_format']
    assert container_format == vol_image_metadata_dict['container_format']
Ejemplo n.º 13
0
def test_create_image_with_metadata(property_key, values, disk_format,
                                    container_format):
    """
    Create image with given metadata/property.

    Args:
        property_key (str): the key for the property, such as sw_wrs_auto_recovery
        values (list): list of values to test for the specific key
        disk_format (str): such as 'raw', 'qcow2'
        container_format (str): such as bare

    Test Steps;
        - Create image with given disk format, container format, property key and value pair
        - Verify property value is correctly set via glance image-show

    Teardown:
        - Delete created images

    """
    for value in values:
        LOG.tc_step(
            "Creating image with property {}={}, disk_format={}, container_format={}"
            .format(property_key, value, disk_format, container_format))
        image_id = glance_helper.create_image(
            disk_format=disk_format,
            container_format=container_format,
            cleanup='function',
            **{property_key: value})[1]

        LOG.tc_step(
            "Verify image property is set correctly via glance image-show.")
        actual_property_val = glance_helper.get_image_properties(
            image_id, property_key)[0]
        assert value.lower() == actual_property_val.lower(), \
            "Actual image property {} value - {} is different than set value - {}".format(
                    property_key, actual_property_val, value)
Ejemplo n.º 14
0
def create_flavors_and_images(request):
    # TODO need to check with add_default_specs set to True on baremetal
    fl_id = nova_helper.create_flavor(name=request.param['flavor_name_1'],
                                      vcpus=request.param['flavor_vcpus'],
                                      ram=request.param['flavor_ram'],
                                      root_disk=request.param['flavor_disk'],
                                      properties=request.param['properties'],
                                      is_public=True,
                                      add_default_specs=False,
                                      cleanup="module")[1]
    fl_id_2 = nova_helper.create_flavor(name=request.param["flavor_name_2"],
                                        vcpus=request.param["flavor_vcpus"],
                                        ram=request.param["flavor_ram"],
                                        root_disk=request.param["flavor_disk"],
                                        properties=request.param["properties"],
                                        is_public=True,
                                        add_default_specs=False,
                                        cleanup="module")[1]
    im_id = glance_helper.create_image(
        name=request.param['image_name'],
        source_image_file=request.param['image_file'],
        disk_format=request.param['disk_format'],
        cleanup="module")[1]
    return {"flavor1": fl_id, "flavor2": fl_id_2, "image": im_id}
Ejemplo n.º 15
0
def test_vm_autorecovery(cpu_policy, flavor_auto_recovery, image_auto_recovery, disk_format,
                                           container_format, expt_result):
    """
    Test auto recovery setting in vm with various auto recovery settings in flavor and image.

    Args:
        cpu_policy (str|None): cpu policy to set in flavor
        flavor_auto_recovery (str|None): None (unset) or true or false
        image_auto_recovery (str|None): None (unset) or true or false
        disk_format (str):
        container_format (str):
        expt_result (bool): Expected vm auto recovery behavior. False > disabled, True > enabled.

    Test Steps:
        - Create a flavor with auto recovery and cpu policy set to given values in extra spec
        - Create an image with auto recovery set to given value in metadata
        - Boot a vm with the flavor and from the image
        - Set vm state to error via nova reset-state
        - Verify vm auto recovery behavior is as expected

    Teardown:
        - Delete created vm, volume, image, flavor

    """

    LOG.tc_step("Create a flavor with cpu_policy set to {} and auto_recovery set to {} in extra spec".format(
            cpu_policy, flavor_auto_recovery))
    flavor_id = nova_helper.create_flavor(name='auto_recover_'+str(flavor_auto_recovery), cleanup='function')[1]

    # Add extra specs as specified
    extra_specs = {}
    if cpu_policy is not None:
        extra_specs[FlavorSpec.CPU_POLICY] = cpu_policy
    if flavor_auto_recovery is not None:
        extra_specs[FlavorSpec.AUTO_RECOVERY] = flavor_auto_recovery

    if extra_specs:
        nova_helper.set_flavor(flavor=flavor_id, **extra_specs)

    property_key = ImageMetadata.AUTO_RECOVERY
    LOG.tc_step("Create an image with property auto_recovery={}, disk_format={}, container_format={}".
                format(image_auto_recovery, disk_format, container_format))
    if image_auto_recovery is None:
        image_id = glance_helper.create_image(disk_format=disk_format, container_format=container_format,
                                              cleanup='function')[1]
    else:
        image_id = glance_helper.create_image(disk_format=disk_format, container_format=container_format,
                                              cleanup='function', **{property_key: image_auto_recovery})[1]

    # auto recovery in image metadata will not work if vm booted from volume
    # LOG.tc_step("Create a volume from the image")
    # vol_id = cinder_helper.create_volume(name='auto_recov', image_id=image_id, rtn_exist=False)[1]
    # ResourceCleanup.add('volume', vol_id)

    LOG.tc_step("Boot a vm from image with auto recovery - {} and using the flavor with auto recovery - {}".format(
                image_auto_recovery, flavor_auto_recovery))
    vm_id = vm_helper.boot_vm(name='auto_recov', flavor=flavor_id, source='image', source_id=image_id,
                              cleanup='function')[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    LOG.tc_step("Verify vm auto recovery is {} by setting vm to error state.".format(expt_result))
    vm_helper.set_vm_state(vm_id=vm_id, error_state=True, fail_ok=False)
    res_bool, actual_val = vm_helper.wait_for_vm_values(vm_id=vm_id, status=VMStatus.ACTIVE, fail_ok=True,
                                                        timeout=600)

    assert expt_result == res_bool, "Expected auto_recovery: {}. Actual vm status: {}".format(
            expt_result, actual_val)

    LOG.tc_step("Ensure vm is pingable after auto recovery")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
Ejemplo n.º 16
0
def test_vif_model_from_image(img_vif, check_avs_pattern):
    """
    Test vif model set in image metadata is reflected in vm nics when use normal vnic type.
    Args:
        img_vif (str):
        check_avs_pattern:

    Test Steps:
        - Create a glance image with given img_vif in metadata
        - Create a cinder volume from above image
        - Create a vm with 3 vnics from above cinder volume:
            - nic1 and nic2 with normal vnic type
            - nic3 with avp (if AVS, otherwise normal)
        - Verify nic1 and nic2 vif model is the same as img_vif
        - Verify nic3 vif model is avp (if AVS, otherwise normal)

    """

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(img_vif))
    img_id = glance_helper.create_image('vif_{}'.format(img_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           img_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(img_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else img_vif
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': tenant_net_id
    }, {
        'net-id': internal_net_id,
        'vif-model': vif_model
    }]

    LOG.tc_step(
        "Boot a vm from above volume with following nics: {}".format(nics))
    vm_id = vm_helper.boot_vm(name='vif_img_{}'.format(img_vif),
                              nics=nics,
                              source='volume',
                              source_id=volume_id,
                              cleanup='function')[1]

    LOG.tc_step(
        "Verify vnics info from virsh to ensure tenant net vif is as specified in image metadata"
    )
    internal_mac = network_helper.get_ports(server=vm_id,
                                            network=internal_net_id,
                                            field='MAC Address')[0]
    vm_interfaces = vm_helper.get_vm_interfaces_via_virsh(vm_id)
    for vm_if in vm_interfaces:
        if_mac, if_model = vm_if
        if if_mac == internal_mac:
            assert if_model == vif_model
        else:
            assert if_model == img_vif
Ejemplo n.º 17
0
def test_ipv6_subnet(vif_model, check_avs_pattern):
    """
    Ipv6 Subnet feature test cases

    Test Steps:
        - Create networks
        - Create Ipv6 enabled subnet
        - Boot the first vm with the ipv6 subnet
        - Boot the second vm with ipv6 subnet
        - Configure interfaces to get ipv6 addr
        - Verify connectivity ipv6 interfaces
        - Ping default router

    Test Teardown:
        - Delete vms, subnets, and networks created

    """
    network_names = ['network11']
    net_ids = []
    sub_nets = ["fd00:0:0:21::/64"]
    gateway_ipv6 = "fd00:0:0:21::1"
    subnet_ids = []

    dns_server = "2001:4860:4860::8888"

    LOG.tc_step("Create Networks to setup IPV6 subnet")
    for net in network_names:
        net_ids.append(
            network_helper.create_network(name=net, cleanup='function')[1])

    LOG.tc_step("Create IPV6 Subnet on the Network Created")
    for sub, network in zip(sub_nets, net_ids):
        subnet_ids.append(
            network_helper.create_subnet(network=network,
                                         ip_version=6,
                                         dns_servers=dns_server,
                                         subnet_range=sub,
                                         gateway='none',
                                         cleanup='function')[1])

    LOG.tc_step("Boot a VM with mgmt net and Network with IPV6 subnet")
    mgmt_net_id = network_helper.get_mgmt_net_id()
    nics = [{
        'net-id': mgmt_net_id
    }, {
        'net-id': net_ids[0],
        'vif-model': vif_model
    }]

    image = None
    if vif_model == 'e1000':
        image = glance_helper.create_image(name=vif_model,
                                           hw_vif_model=vif_model,
                                           cleanup='function')[1]

    LOG.tc_step("Boot a vm with created nets")
    vm_id = vm_helper.boot_vm(name='vm-with-ipv6-nic',
                              nics=nics,
                              image_id=image,
                              cleanup='function')[1]
    LOG.tc_step("Setup interface script inside guest and restart network")
    _bring_up_interface(vm_id)

    LOG.tc_step("Boot a second vm with created nets")
    vm_id2 = vm_helper.boot_vm(name='vm2-with-ipv6-nic',
                               nics=nics,
                               cleanup='function')[1]
    LOG.tc_step("Setup interface script inside guest and restart network")
    _bring_up_interface(vm_id2)

    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        ip_addr = _get_ipv6_for_eth(eth_name='eth1', ssh_client=vm_ssh)

        if ip_addr is '':
            LOG.info('Ip addr is not assigned')
            assert ip_addr != '', "Failed to assign ip"
        else:
            LOG.info("Got Ipv6 address:{}".format(ip_addr))

    with vm_helper.ssh_to_vm_from_natbox(vm_id2) as vm_ssh:
        LOG.tc_step("ping b/w vms on the ipv6 net")
        ping = _ping6_vms(ssh_client=vm_ssh, ipv6_addr=ip_addr)
        assert ping == 0, "Ping between VMs failed"
        LOG.tc_step("ping Default Gateway from vms on the ipv6 net")
        ping = _ping6_vms(ssh_client=vm_ssh, ipv6_addr=gateway_ipv6)
        assert ping == 0, "Ping to default router failed"
Ejemplo n.º 18
0
def test_ntfs(stx_openstack_required, host_type="controller"):
    """
    This test will test NTFS mount and NTFS formatted device creation on a TiS
    system.

    Arguments:
    - host_type (string) - host type to be tested, e.g. controller, compute,
      storage

    Returns:
    - Nothing

    Test Steps:
    1.  Check if desired host has USB inserted.  If not, skip
    2.  Wipe USB
    3.  Change label of device
    4.  Create partitions on NTFS device
    5.  Format partitions
    4.  Copy large image to NTFS mount point
    5.  Test mount and big file creation on NTFS mounted device
    """

    # Could pass these in through parametrize instead
    mount_type = "ntfs"
    mount_point = "/media/ntfs/"
    guest_os = 'win_2012'
    boot_source = "image"

    host, usb_device = locate_usb(host_type, min_size=13)
    if not host:
        skip("No USB hardware found on {} host type".format(host_type))

    hosts_with_image_backing = host_helper.get_hosts_in_storage_backing(storage_backing='image')
    if len(hosts_with_image_backing) == 0:
        skip("No hosts with image backing present")

    # if the host with the USB is not the active controler, swact controllers
    con_ssh = ControllerClient.get_active_controller()
    active_controller = system_helper.get_active_controller_name(con_ssh)
    if host != active_controller:
        host_helper.swact_host()

    with host_helper.ssh_to_host(host) as host_ssh:
        wipe_usb(host_ssh, usb_device)
        umount_usb(host_ssh, mount_point=mount_point)
        create_usb_label(host_ssh, usb_device, label="msdos")
        create_usb_partition(host_ssh, usb_device, startpt="0", endpt="2048")
        format_usb(host_ssh, usb_device, partition="1")
        create_usb_partition(host_ssh, usb_device, startpt="2049", endpt="100%")
        format_usb(host_ssh, usb_device, partition="2")
        mount_usb(host_ssh, usb_device, partition="2", mount_type=mount_type, mount_point=mount_point)

    LOG.tc_step("Copy the windows guest image to the mount point")
    src_img = glance_helper.scp_guest_image(img_os=guest_os, dest_dir=mount_point, con_ssh=con_ssh)

    LOG.tc_step("Create flavor for windows guest image")
    flv_id = nova_helper.create_flavor(name=guest_os, vcpus=4, ram=8192, storage_backing="local_image",
                                       guest_os=guest_os)[1]
    nova_helper.set_flavor(flv_id, **{FlavorSpec.CPU_POLICY: "dedicated"})
    ResourceCleanup.add("flavor", flv_id)

    LOG.tc_step("Import image into glance")
    glance_helper.create_image(name=guest_os, source_image_file=src_img, disk_format="qcow2",
                               container_format="bare", con_ssh=con_ssh, cleanup="function")

    LOG.tc_step("Boot VM")
    vm_id = vm_helper.boot_vm(name=guest_os, flavor=flv_id, guest_os=guest_os, source=boot_source, cleanup="function")[1]

    LOG.tc_step("Ping vm and ssh to it")
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    with vm_helper.ssh_to_vm_from_natbox(vm_id) as vm_ssh:
        output = vm_ssh.exec_cmd('pwd', fail_ok=False)[1]
        LOG.info(output)
Ejemplo n.º 19
0
    def test_multiports_on_same_network_pci_vm_actions(self, base_setup_pci,
                                                       vifs):
        """
        Test vm actions on vm with multiple ports with given vif models on
        the same tenant network

        Args:
            base_setup_pci (tuple): base_vm_pci, flavor, mgmt_net_id,
                tenant_net_id, internal_net_id, seg_id
            vifs (list): list of vifs to add to same internal net

        Setups:
            - Create a flavor with dedicated cpu policy (class)
            - Choose management net, one tenant net, and internal0-net1 to be
            used by test (class)
            - Boot a base pci-sriov vm - vm1 with above flavor and networks,
            ping it from NatBox (class)
            - Ping vm1 from itself over data, and internal networks

        Test Steps:
            - Boot a vm under test - vm2 with above flavor and with multiple
            ports on same tenant network with vm1,
                and ping it from NatBox
            - Ping vm2's own data and internal network ips
            - Ping vm2 from vm1 to verify management and data networks
            connection
            - Perform one of the following actions on vm2
                - set to error/ wait for auto recovery
                - suspend/resume
                - cold migration
                - pause/unpause
            - Update vlan interface to proper eth if pci-passthrough device
            moves to different eth
            - Verify ping from vm1 to vm2 over management and data networks
            still works
            - Repeat last 3 steps with different vm actions

        Teardown:
            - Delete created vms and flavor
        """

        base_vm_pci, flavor, base_nics, avail_sriov_net, avail_pcipt_net, \
            pcipt_seg_ids, extra_pcipt_net = base_setup_pci

        pcipt_included = False
        internal_net_id = None
        for vif in vifs:
            if not isinstance(vif, str):
                vif = vif[0]
            if 'pci-passthrough' in vif:
                if not avail_pcipt_net:
                    skip(SkipHostIf.PCIPT_IF_UNAVAIL)
                internal_net_id = avail_pcipt_net
                pcipt_included = True
                continue
            elif 'pci-sriov' in vif:
                if not avail_sriov_net:
                    skip(SkipHostIf.SRIOV_IF_UNAVAIL)
                internal_net_id = avail_sriov_net

        assert internal_net_id, "test script error. Internal net should have " \
                                "been determined."

        nics, glance_vif = _append_nics_for_net(vifs, net_id=internal_net_id,
                                                nics=base_nics)
        if pcipt_included and extra_pcipt_net:
            nics.append(
                {'net-id': extra_pcipt_net, 'vif-model': 'pci-passthrough'})

        img_id = None
        if glance_vif:
            img_id = glance_helper.create_image(name=glance_vif,
                                                hw_vif_model=glance_vif,
                                                cleanup='function')[1]

        LOG.tc_step("Boot a vm with following vifs on same internal net: "
                    "{}".format(vifs))
        vm_under_test = vm_helper.boot_vm(name='multiports_pci',
                                          nics=nics, flavor=flavor,
                                          cleanup='function',
                                          reuse_vol=False, image_id=img_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_under_test, fail_ok=False)

        if pcipt_included:
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_under_test,
                                                       net_seg_id=pcipt_seg_ids,
                                                       init_conf=True)

        LOG.tc_step("Ping vm's own data and internal network ips")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=vm_under_test,
                                   net_types=['data', 'internal'])

        LOG.tc_step(
            "Ping vm_under_test from base_vm over management, data, "
            "and internal networks")
        vm_helper.ping_vms_from_vm(to_vms=vm_under_test, from_vm=base_vm_pci,
                                   net_types=['mgmt', 'data', 'internal'])

        for vm_actions in [['auto_recover'], ['cold_migrate'],
                           ['pause', 'unpause'], ['suspend', 'resume']]:
            if 'auto_recover' in vm_actions:
                LOG.tc_step(
                    "Set vm to error state and wait for auto recovery "
                    "complete, "
                    "then verify ping from base vm over management and "
                    "internal networks")
                vm_helper.set_vm_state(vm_id=vm_under_test, error_state=True,
                                       fail_ok=False)
                vm_helper.wait_for_vm_values(vm_id=vm_under_test,
                                             status=VMStatus.ACTIVE,
                                             fail_ok=False, timeout=600)
            else:
                LOG.tc_step("Perform following action(s) on vm {}: {}".format(
                    vm_under_test, vm_actions))
                for action in vm_actions:
                    vm_helper.perform_action_on_vm(vm_under_test, action=action)

            vm_helper.wait_for_vm_pingable_from_natbox(vm_id=vm_under_test)
            if pcipt_included:
                LOG.tc_step(
                    "Bring up vlan interface for pci-passthrough vm {}.".format(
                        vm_under_test))
                vm_helper.add_vlan_for_vm_pcipt_interfaces(
                    vm_id=vm_under_test, net_seg_id=pcipt_seg_ids)

            LOG.tc_step(
                "Verify ping from base_vm to vm_under_test over management "
                "and internal networks still works "
                "after {}".format(vm_actions))
            vm_helper.ping_vms_from_vm(to_vms=vm_under_test,
                                       from_vm=base_vm_pci,
                                       net_types=['mgmt', 'internal'])
Ejemplo n.º 20
0
def image_with_vif_multiq():
    img_id = glance_helper.create_image(name='vif_multq',
                                        cleanup='function')[1]
    glance_helper.set_image(image=img_id,
                            properties={'hw_vif_multiqueue_enabled': True})
    return img_id
Ejemplo n.º 21
0
def test_attach_cinder_volume_to_instance(vol_vif, check_avs_pattern):
    """
    Validate that cinder volume can be attached to VM created using wrl5_avp and wrl5_virtio image

    Args:
        vol_vif (str)

    Test Steps:
        - Create cinder volume
        - Boot VM use WRL image
        - Attach cinder volume to WRL virtio/avp instance
        - Check VM nics vifs are not changed

    Teardown:
        - Delete VM
        - Delete cinder volume
    """
    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    vif_model = 'avp' if system_helper.is_avs() else 'virtio'
    nics = [
        {
            'net-id': mgmt_net_id
        },
        {
            'net-id': tenant_net_id
        },
        {
            'net-id': internal_net_id,
            'vif-model': vif_model
        },
    ]

    LOG.tc_step("Boot up VM from default tis image")
    vm_id = vm_helper.boot_vm(name='vm_attach_vol_{}'.format(vol_vif),
                              source='image',
                              nics=nics,
                              cleanup='function')[1]

    prev_ports = network_helper.get_ports(server=vm_id)

    LOG.tc_step(
        "Create an image with vif model metadata set to {}".format(vol_vif))
    img_id = glance_helper.create_image('vif_{}'.format(vol_vif),
                                        cleanup='function',
                                        **{ImageMetadata.VIF_MODEL:
                                           vol_vif})[1]

    LOG.tc_step("Boot a volume from above image")
    volume_id = cinder_helper.create_volume('vif_{}'.format(vol_vif),
                                            source_id=img_id,
                                            cleanup='function')[1]

    # boot a cinder volume and attached it to vm
    LOG.tc_step("Attach cinder Volume to VM")
    vm_helper.attach_vol_to_vm(vm_id, vol_id=volume_id)

    LOG.tc_step("Check vm nics vif models are not changed")
    post_ports = network_helper.get_ports(server=vm_id)

    assert prev_ports == post_ports
Ejemplo n.º 22
0
def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    image_id = glance_helper.get_image_id_from_name(
        GuestImages.DEFAULT['guest'], strict=True)
    if cpu_pol is not None:
        if pol_source == 'flavor':
            specs = {FlavorSpec.CPU_POLICY: cpu_pol}

            LOG.tc_step("Set following extra specs: {}".format(specs))
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            image_meta = {ImageMetadata.CPU_POLICY: cpu_pol}
            LOG.tc_step(
                "Create image with following metadata: {}".format(image_meta))
            image_id = glance_helper.create_image(
                name='cpu_pol_{}'.format(cpu_pol),
                cleanup='function',
                **image_meta)[1]
    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol),
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step(
        "Boot a vm from {} with above flavor and check vm topology is as "
        "expected".format(boot_source))
    vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus),
                              flavor=flavor_id,
                              source=boot_source,
                              source_id=source_id,
                              cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Suspend/Resume vm and check vm topology stays the same")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Stop/Start vm and check vm topology stays the same")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    prev_siblings = check_helper.check_topology_of_vm(
        vm_id,
        vcpus=flv_vcpus,
        cpu_pol=cpu_pol,
        vm_host=vm_host,
        prev_total_cpus=prev_cpus[vm_host])[1]

    LOG.tc_step("Live migrate vm and check vm topology stays the same")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host],
                                      prev_siblings=prev_siblings)

    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Ejemplo n.º 23
0
def test_create_snapshot_using_boot_from_volume_vm():
    """
    This test creates a snapshot from a VM that is booted from volume using
    nova image-create.  Nova image-create will create a glance image that can
    be used to boot a VM, but the snapshot seen in glance will be empty, since
    the real image is stored in cinder.

    Test Steps:
    1.  Run cinder create --image <img-uuid> --size <size> <bootable_vol>
    2.  Boot a VM using the bootable volume
    3.  Run nova image-create <vm-id> <name> to save a snapshot of the vm
    4.  Run cinder snapshot-list to list the snapshot of the VM
    5.  Run cinder create --snapshot-id <snapshot-from-VM> --name <vol-name>
<size>
    6.  Run cinder upload-to-image <vol-uuid> <image-name> to create a image
    7.  Glance image-download to download the snapshot.

    Teardown:
    1.  Delete VMs
    2.  Delete volumes
    3.  Delete snapshots

    Possible Improvements:
    1.  Could update test to use non-raw images, but determining size of of
    image is more complex if the original file is no longer on the filesystem.
    """

    con_ssh = ControllerClient.get_active_controller()

    LOG.tc_step("Get available images")
    image_list = glance_helper.get_images()

    if len(image_list) == 0:
        skip("The test requires some images to be present")

    # Filter out zero-sized images and non-raw images (latter is lazy)
    image_uuid = vol_size = None
    for image in image_list:
        image_uuid = image
        image_prop_s, image_prop_d = glance_helper.get_image_values(
            image_uuid, ("size", "disk_format"))
        if str(image_prop_s) == "0" or image_prop_d != "raw":
            continue
        else:
            divisor = 1024 * 1024 * 1024
            image_size = int(image_prop_s)
            vol_size = int(math.ceil(image_size / divisor))
            break
    else:
        skip("No usable images found")

    LOG.tc_step("Create a cinder bootable volume")
    # Check if lab has emc-vnx volume types. Use volume type = iscsi;
    # Creating snapshot with emc-vnx(EMS San)
    # is not supported yet.
    volume_types = cinder_helper.get_volume_types(field='Name')
    vol_type = 'iscsi' if any('emc' in t for t in volume_types) else None
    vol_id = cinder_helper.create_volume(source_id=image_uuid,
                                         vol_type=vol_type,
                                         size=vol_size,
                                         fail_ok=False,
                                         cleanup='function')[1]

    LOG.tc_step("Boot VM using newly created bootable volume")
    vm_id = vm_helper.boot_vm(source="volume",
                              source_id=vol_id,
                              cleanup='function')[1]
    vm_name = vm_helper.get_vm_name_from_id(vm_id)
    snapshot_name = vm_name + "_snapshot"

    # nova image-create generates a glance image of 0 size
    # real snapshot is stored in cinder
    LOG.tc_step("Create a snapshot based on that VM")
    code, image_id, snapshot_id = vm_helper.create_image_from_vm(
        vm_id,
        image_name=snapshot_name,
        cleanup='function',
        expt_cinder_snapshot=True)

    vol_name = "vol_from_snapshot"
    # Creates volume from snapshot
    LOG.tc_step("Create cinder volume from vm snapshot")
    snapshot_vol_id = cinder_helper.create_volume(name=vol_name,
                                                  source_id=snapshot_id,
                                                  source_type='snapshot',
                                                  cleanup='function')[1]

    # Creates an image
    LOG.tc_step("Upload cinder volume to image")
    image_name = "cinder_upload"
    img_from_vol_id = glance_helper.create_image(name=image_name,
                                                 volume=snapshot_vol_id,
                                                 auth_info=None,
                                                 cleanup='function')[1]

    image_filename = '{}/images/temp'.format(HostLinuxUser.get_home())
    LOG.tc_step("Download the image snapshot")
    cmd = "image save --file {} {}".format(image_filename, image_id)
    cli.openstack(cmd, ssh_client=con_ssh, fail_ok=False)

    # Downloading should be good enough for validation.  If the file is
    # zero-size, download will report failure.
    LOG.tc_step("Delete the downloaded image")
    con_ssh.exec_cmd("rm {}".format(image_filename), fail_ok=False)

    LOG.tc_step('Delete uploaded image')
    glance_helper.delete_images(images=img_from_vol_id)

    LOG.tc_step('Delete created cinder volume from vm snapshot')
    cinder_helper.delete_volumes(snapshot_vol_id)

    LOG.tc_step(
        'Delete cinder snapshot and image snapshot from boot-from-volume vm')
    glance_helper.delete_images(image_id)
    cinder_helper.delete_volume_snapshots(snapshots=snapshot_id)
Ejemplo n.º 24
0
def test_cpu_realtime_vm_actions(vcpus, cpu_rt, rt_mask, rt_source, shared_vcpu, numa_nodes, cpu_thread, check_hosts):
    """
    Test vm with realtime cpu policy specified in flavor
    Args:
        vcpus (int):
        cpu_rt (str|None):
        rt_source (str): flavor or image
        rt_mask (str):
        shared_vcpu (int|None):min_vcpus
        numa_nodes (int|None): number of numa_nodes to boot vm on
        cpu_thread
        check_hosts (tuple): test fixture

    Setups:
        - check storage backing and whether system has shared cpu configured

    Test Steps:
        - Create a flavor with given cpu realtime, realtime mask and shared vcpu extra spec settings
        - Create a vm with above flavor
        - Verify cpu scheduler policies via virsh dumpxml and ps
        - Perform following nova actions and repeat above step after each action:
            ['suspend', 'resume'],
            ['live_migrate'],
            ['cold_migrate'],
            ['rebuild']

    """
    storage_backing, hosts_with_shared_cpu, ht_hosts = check_hosts

    if cpu_thread == 'require' and len(ht_hosts) < 2:
        skip("Less than two hyperthreaded hosts")

    if shared_vcpu is not None and len(hosts_with_shared_cpu) < 2:
        skip("Less than two up hypervisors configured with shared cpu")

    cpu_rt_flv = cpu_rt
    if rt_source == 'image':
        # rt_mask_flv = cpu_rt_flv = None
        rt_mask_flv = '^0'
        rt_mask_img = rt_mask
    else:
        rt_mask_flv = rt_mask
        rt_mask_img = None

    image_id = None
    if rt_mask_img is not None:
        image_metadata = {ImageMetadata.CPU_RT_MASK: rt_mask_img}
        image_id = glance_helper.create_image(name='rt_mask', cleanup='function', **image_metadata)[1]

    vol_id = cinder_helper.create_volume(source_id=image_id)[1]
    ResourceCleanup.add('volume', vol_id)

    name = 'rt-{}_mask-{}_{}vcpu'.format(cpu_rt, rt_mask_flv, vcpus)
    flv_id = create_rt_flavor(vcpus, cpu_pol='dedicated', cpu_rt=cpu_rt_flv, rt_mask=rt_mask_flv,
                              shared_vcpu=shared_vcpu, numa_nodes=numa_nodes, cpu_thread=cpu_thread,
                              storage_backing=storage_backing)[0]

    LOG.tc_step("Boot a vm with above flavor")
    vm_id = vm_helper.boot_vm(name=name, flavor=flv_id, cleanup='function', source='volume', source_id=vol_id)[1]
    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)

    expt_rt_cpus, expt_ord_cpus = parse_rt_and_ord_cpus(vcpus=vcpus, cpu_rt=cpu_rt, cpu_rt_mask=rt_mask)

    check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu)
    vm_host = vm_helper.get_vm_host(vm_id)
    if shared_vcpu:
        assert vm_host in hosts_with_shared_cpu

    numa_num = 1 if numa_nodes is None else numa_nodes
    check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, vm_host=vm_host)

    expt_current_cpu = vcpus
    # if min_vcpus is not None:
    #     GuestLogs.add(vm_id)
    #     LOG.tc_step("Scale down cpu once")
    #     vm_helper.scale_vm(vm_id, direction='down', resource='cpu')
    #     vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    #
    #     LOG.tc_step("Check current vcpus in nova show is reduced after scale down")
    #     expt_current_cpu -= 1
    #     check_helper.check_vm_vcpus_via_nova_show(vm_id, min_vcpus, expt_current_cpu, vcpus)

    for actions in [['suspend', 'resume'], ['stop', 'start'], ['live_migrate'], ['cold_migrate'], ['rebuild']]:
        LOG.tc_step("Perform {} on vm and check realtime cpu policy".format(actions))
        for action in actions:
            kwargs = {}
            if action == 'rebuild':
                kwargs = {'image_id': image_id}
            vm_helper.perform_action_on_vm(vm_id, action=action, **kwargs)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vm_host_post_action = vm_helper.get_vm_host(vm_id)
        if shared_vcpu:
            assert vm_host_post_action in hosts_with_shared_cpu

        LOG.tc_step("Check cpu thread policy in vm topology and vcpus in nova show after {}".format(actions))
        check_helper.check_topology_of_vm(vm_id, vcpus, cpu_pol='dedicated', cpu_thr_pol=cpu_thread, numa_num=numa_num,
                                          vm_host=vm_host_post_action, current_vcpus=expt_current_cpu)

        check_virsh = True
        offline_cpu = None

        check_rt_and_ord_cpus_via_virsh_and_ps(vm_id, vcpus, expt_rt_cpus, expt_ord_cpus, shared_vcpu=shared_vcpu,
                                               offline_cpus=offline_cpu, check_virsh_vcpusched=check_virsh)
Ejemplo n.º 25
0
def refstack_setup(refstack_pre_check, request):

    LOG.fixture_step("Enable Swift if not already done")
    storage_helper.modify_swift(enable=True)

    LOG.fixture_step("Create tenants, users, and update quotas")
    compliance_helper.create_tenants_and_update_quotas(add_swift_role=True)

    LOG.fixture_step("Create test flavors")
    flavors = []
    for i in range(2):
        flavor_id = nova_helper.create_flavor(name='refstack', vcpus=2, ram=2048, root_disk=2, cleanup='session')[1]
        nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated',
                                             FlavorSpec.MEM_PAGE_SIZE: 2048})
        flavors.append(flavor_id)

    LOG.fixture_step("Get/create test images")
    images = [glance_helper.get_image_id_from_name()]
    image_id = glance_helper.create_image()[1]
    images.append(image_id)
    ResourceCleanup.add('image', image_id, scope='session')

    LOG.fixture_step("Setup public router if not already done.")
    external_net_id = network_helper.get_networks(external=True)[0]
    public_router = 'public-router0'
    pub_routers = network_helper.get_routers(name=public_router, auth_info=Tenant.get('admin'))
    if not pub_routers:
        LOG.info("Create public router and add interfaces")
        public_router_id = network_helper.create_router(name=public_router, project=Tenant.get('admin')['tenant'])[1]
        network_helper.set_router_gateway(router_id=public_router_id, external_net=external_net_id)

        internal_subnet = 'internal0-subnet0-1'
        gateway = '10.1.1.1'
        network_helper.set_subnet(subnet=internal_subnet, gateway=gateway)
        network_helper.add_router_interface(router=public_router_id, subnet=internal_subnet,
                                            auth_info=Tenant.get('admin'))

    keystone_pub = keystone_helper.get_endpoints(field='URL', interface='public', service_name='keystone')[0]
    keystone_pub_url = keystone_pub.split('/v')[0] + '/'
    keystone_pub_url = keystone_pub_url.replace(':', '\:').replace('/', '\/')

    params_dict = {
        'image_ref': images[0],
        'image_ref_alt': images[1],
        'flavor_ref': flavors[0],
        'flavor_ref_alt': flavors[1],
        'public_network_id': external_net_id,
        'uri': keystone_pub_url + 'v2.0',
        'uri_v3': keystone_pub_url + 'v3',
        'discoverable_apis': 'tempurl,container_quotas',
        'container_sync': 'false',
        'object_versioning': 'true',
        'discoverability': 'false',
    }

    LOG.fixture_step("Update tempest.conf parameters on cumulus server: \n{}".format(params_dict))
    with compliance_helper.ssh_to_compliance_server() as server_ssh:
        for key, val in params_dict.items():
            server_ssh.exec_cmd('sed -i "s/^{} =.*/{} = {}/g" {}'.format(key, key, val, RefStack.TEMPEST_CONF),
                                fail_ok=False)
            server_ssh.exec_cmd('grep {} {}'.format(val, RefStack.TEMPEST_CONF), fail_ok=False)

        compliance_helper.add_route_for_vm_access(server_ssh)

    def scp_logs():
        LOG.info("scp test results files from refstack test host to local automation dir")
        dest_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'compliance')
        os.makedirs(dest_dir, exist_ok=True)
        localhost = LocalHostClient()
        localhost.connect()

        for item in RefStack.LOG_FILES:
            source_path = '{}/{}'.format(RefStack.TEST_HISTORY_DIR, item)
            localhost.scp_on_dest(source_ip=ComplianceCreds.get_host(), source_user=ComplianceCreds.get_user(),
                                  source_pswd=ComplianceCreds.get_password(), source_path=source_path,
                                  dest_path=dest_dir, timeout=300, cleanup=False)

        origin_name = ComplianceVar.get_var('REFSTACK_SUITE').rsplit(r'/', maxsplit=1)[-1]
        localhost.exec_cmd('mv {}/test-list.txt {}/{}'.format(dest_dir, dest_dir, origin_name))
    request.addfinalizer(scp_logs)
Ejemplo n.º 26
0
 def image_mempage(self):
     LOG.fixture_step("(class) Create a glance image for mempage testcases")
     image_id = glance_helper.create_image(name='mempage',
                                           cleanup='class')[1]
     return image_id
Ejemplo n.º 27
0
def _test_cpu_pol_dedicated_shared_coexists(vcpus_dedicated, vcpus_shared, pol_source, boot_source):
    """
    Test two vms coexisting on the same host, one with the dedicated cpu property, and one with the shared cpu property.

    Args:
        vcpus_dedicated: Amount of vcpu(s) to allocate for the vm with the dedicated CPU_POLICY.
        vcpus_shared: Amount of vcpu(s) to allocate for the vm with the shared CPU_POLICY.
        pol_source: Where the CPU_POLICY is set from.
        boot_source: The boot media the vm will use to boot.

    Test Setups:
        - Create two flavors, one for each vm.
        - If using 'flavor' for pol_source, set extra specs for the CPU_POLICY.
        - If using 'image' for pol_source, set ImageMetaData for the CPU_POLICY.
        - If using 'volume' for boot_source, create volume from tis image.
        - If using 'image' for boot_source, use tis image.
        - Determine the amount of free vcpu(s) on the compute before testing.

    Test Steps:
        - Boot the first vm with CPU_POLICY: dedicated.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Determine the amount of free vcpu(s) on the compute.
        - Boot the second vm with CPU_POLICY: shared.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Delete vms
        - Determine the amount of free vcpu(s) on the compute after testing.
        - Compare free vcpu(s) on the compute before and after testing, ensuring they are the same.

    Test Teardown
        - Delete created volumes and flavors
    """
    LOG.tc_step("Getting host list")
    target_hosts = host_helper.get_hypervisors(state='up')
    target_host = target_hosts[0]
    storage_backing = host_helper.get_host_instance_backing(host=target_host)
    if 'image' in storage_backing:
        storage_backing = 'local_image'
    elif 'remote' in storage_backing:
        storage_backing = 'remote'

    image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)
    pre_test_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    collection = ['dedicated', 'shared']
    vm_ids = []
    for x in collection:
        if x == 'dedicated':
            vcpus = vcpus_dedicated
        else:
            vcpus = vcpus_shared
        LOG.tc_step("Create {} flavor with {} vcpus".format(x, vcpus))
        flavor_id = nova_helper.create_flavor(name=x, vcpus=vcpus, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_id)

        if pol_source == 'flavor':
            LOG.tc_step("Set CPU_POLICY for {} flavor".format(x))
            specs = {FlavorSpec.CPU_POLICY: x}
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            LOG.tc_step("Create image with CPU_POLICY: {}".format(x))
            image_meta = {ImageMetadata.CPU_POLICY: x}
            image_id = glance_helper.create_image(name='cpu_pol_{}'.format(x), cleanup='function', **image_meta)[1]

        if boot_source == 'volume':
            LOG.tc_step("Create volume from image")
            source_id = cinder_helper.create_volume(name='cpu_pol_{}'.format(x), source_id=image_id)[1]
            ResourceCleanup.add('volume', source_id)
        else:
            source_id = image_id

        pre_boot_cpus = host_helper.get_vcpus_for_computes(field='used_now')
        LOG.tc_step("Booting cpu_pol_{}".format(x))
        vm_id = vm_helper.boot_vm(name='cpu_pol_{}'.format(x), flavor=flavor_id, source=boot_source,
                                  source_id=source_id, avail_zone='nova', vm_host=target_host, cleanup='function')[1]

        vm_ids.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, cpu_pol=x, vm_host=target_host,
                                          prev_total_cpus=pre_boot_cpus[target_host])

    LOG.tc_step("Deleting both dedicated and shared vms")
    vm_helper.delete_vms(vms=vm_ids)

    post_delete_cpus = host_helper.get_vcpus_for_computes(field='used_now')
    assert post_delete_cpus == pre_test_cpus, "vcpu count after test does not equal vcpu count before test"