Esempio n. 1
0
def __create_image(img_os, scope):
    if not img_os:
        img_os = GuestImages.DEFAULT['guest']

    LOG.fixture_step(
        "({}) Get or create a glance image with {} guest OS".format(
            scope, img_os))
    img_info = GuestImages.IMAGE_FILES[img_os]
    img_id = glance_helper.get_image_id_from_name(img_os, strict=True)
    if not img_id:
        if img_info[0] is not None:
            image_path = glance_helper.scp_guest_image(img_os=img_os)
        else:
            img_dir = GuestImages.DEFAULT['image_dir']
            image_path = "{}/{}".format(img_dir, img_info[2])

        disk_format = 'raw' if img_os in [
            'cgcs-guest', 'tis-centos-guest', 'vxworks'
        ] else 'qcow2'
        img_id = glance_helper.create_image(name=img_os,
                                            source_image_file=image_path,
                                            disk_format=disk_format,
                                            container_format='bare',
                                            cleanup=scope)[1]

    return img_id
Esempio n. 2
0
def image_():
    """
    Text fixture to get guest image
    Args:

    Returns: the guest image id

    """
    return glance_helper.get_image_id_from_name()
Esempio n. 3
0
def create_image_with_metadata(guest_os, property_key, values, disk_format,
                               container_format):
    """
    Create image with given metadata/property.

    Args:
        guest_os:
        property_key (str): the key for the property, such as sw_wrs_auto_recovery
        values (list): list of values to test for the specific key
        disk_format (str): such as 'raw', 'qcow2'
        container_format (str): such as bare

    Test Steps;
        - Create image with given disk format, container format, property key and value pair
        - Verify property value is correctly set via glance image-show

    Returns: List of image ids


    """
    image_ids = []

    for value in values:
        LOG.tc_step(
            "Creating image with property {}={}, disk_format={}, container_format={}"
            .format(property_key, value, disk_format, container_format))
        image_name = GuestImages.IMAGE_FILES[guest_os][0]
        image_name = str(image_name) + "_auto"
        img_id = glance_helper.get_image_id_from_name(image_name, strict=True)
        if not img_id:
            image_path = glance_helper.scp_guest_image(img_os=guest_os)

            image_id = glance_helper.create_image(
                source_image_file=image_path,
                cleanup='function',
                disk_format=disk_format,
                container_format=container_format,
                **{property_key: value})[1]
            image_ids.append(image_id)

            LOG.tc_step(
                "Verify image property is set correctly via glance image-show."
            )
            actual_property_val = glance_helper.get_image_properties(
                image_id, property_key)[0]
            assert value.lower() == actual_property_val.lower(), \
                "Actual image property {} value - {} is different than set value - {}".format(
                    property_key, actual_property_val, value)
        else:
            image_ids.append(img_id)

    return image_ids
Esempio n. 4
0
def test_boot_vm_cpu_policy_image(flv_vcpus, flv_pol, img_pol, boot_source, expt_err):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol_{}'.format(flv_pol), vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    if flv_pol is not None:
        specs = {FlavorSpec.CPU_POLICY: flv_pol}

        LOG.tc_step("Set following extra specs: {}".format(specs))
        nova_helper.set_flavor(flavor_id, **specs)

    if img_pol is not None:
        image_meta = {ImageMetadata.CPU_POLICY: img_pol}
        LOG.tc_step("Create image with following metadata: {}".format(image_meta))
        image_id = glance_helper.create_image(name='cpu_pol_{}'.format(img_pol), cleanup='function', **image_meta)[1]
    else:
        image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)

    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol_img', source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step("Attempt to boot a vm from above {} with above flavor".format(boot_source))
    code, vm_id, msg = vm_helper.boot_vm(name='cpu_pol', flavor=flavor_id, source=boot_source,
                                         source_id=source_id, fail_ok=True, cleanup='function')

    # check for negative tests
    if expt_err is not None:
        LOG.tc_step("Check VM failed to boot due to conflict in flavor and image.")
        assert 4 == code, "Expect boot vm cli reject and no vm booted. Actual: {}".format(msg)
        assert eval(expt_err) in msg, "Expected error message is not found in cli return."
        return  # end the test for negative cases

    # Check for positive tests
    LOG.tc_step("Check vm is successfully booted.")
    assert 0 == code, "Expect vm boot successfully. Actual: {}".format(msg)

    # Calculate expected policy:
    expt_cpu_pol = flv_pol if flv_pol else img_pol
    expt_cpu_pol = expt_cpu_pol if expt_cpu_pol else 'shared'

    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id, vcpus=flv_vcpus, cpu_pol=expt_cpu_pol, vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Esempio n. 5
0
def test_ping_vms_from_vm_various_images(vm_image):
    image_id = glance_helper.get_image_id_from_name(name=vm_image,
                                                    strict=False)
    if not image_id:
        skip("No image name has substring: {}.".format(vm_image))

    vol_size = 1
    if vm_image in ['ubuntu', 'centos']:
        vol_size = 8
    vol_id = cinder_helper.create_volume(name='vol_' + vm_image,
                                         source_id=image_id,
                                         size=vol_size)[1]
    vm_id = vm_helper.boot_vm(source='volume', source_id=vol_id)[1]

    vm_helper.ping_vms_from_vm(from_vm=vm_id)
Esempio n. 6
0
def test_cpu_pol_vm_actions(flv_vcpus, cpu_pol, pol_source, boot_source):
    LOG.tc_step("Create flavor with {} vcpus".format(flv_vcpus))
    flavor_id = nova_helper.create_flavor(name='cpu_pol', vcpus=flv_vcpus)[1]
    ResourceCleanup.add('flavor', flavor_id)

    image_id = glance_helper.get_image_id_from_name(
        GuestImages.DEFAULT['guest'], strict=True)
    if cpu_pol is not None:
        if pol_source == 'flavor':
            specs = {FlavorSpec.CPU_POLICY: cpu_pol}

            LOG.tc_step("Set following extra specs: {}".format(specs))
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            image_meta = {ImageMetadata.CPU_POLICY: cpu_pol}
            LOG.tc_step(
                "Create image with following metadata: {}".format(image_meta))
            image_id = glance_helper.create_image(
                name='cpu_pol_{}'.format(cpu_pol),
                cleanup='function',
                **image_meta)[1]
    if boot_source == 'volume':
        LOG.tc_step("Create a volume from image")
        source_id = cinder_helper.create_volume(name='cpu_pol'.format(cpu_pol),
                                                source_id=image_id)[1]
        ResourceCleanup.add('volume', source_id)
    else:
        source_id = image_id

    prev_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    LOG.tc_step(
        "Boot a vm from {} with above flavor and check vm topology is as "
        "expected".format(boot_source))
    vm_id = vm_helper.boot_vm(name='cpu_pol_{}_{}'.format(cpu_pol, flv_vcpus),
                              flavor=flavor_id,
                              source=boot_source,
                              source_id=source_id,
                              cleanup='function')[1]

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Suspend/Resume vm and check vm topology stays the same")
    vm_helper.suspend_vm(vm_id)
    vm_helper.resume_vm(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])

    LOG.tc_step("Stop/Start vm and check vm topology stays the same")
    vm_helper.stop_vms(vm_id)
    vm_helper.start_vms(vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    prev_siblings = check_helper.check_topology_of_vm(
        vm_id,
        vcpus=flv_vcpus,
        cpu_pol=cpu_pol,
        vm_host=vm_host,
        prev_total_cpus=prev_cpus[vm_host])[1]

    LOG.tc_step("Live migrate vm and check vm topology stays the same")
    vm_helper.live_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    prev_siblings = prev_siblings if cpu_pol == 'dedicated' else None
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host],
                                      prev_siblings=prev_siblings)

    LOG.tc_step("Cold migrate vm and check vm topology stays the same")
    vm_helper.cold_migrate_vm(vm_id=vm_id)

    vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
    vm_host = vm_helper.get_vm_host(vm_id)
    check_helper.check_topology_of_vm(vm_id,
                                      vcpus=flv_vcpus,
                                      cpu_pol=cpu_pol,
                                      vm_host=vm_host,
                                      prev_total_cpus=prev_cpus[vm_host])
Esempio n. 7
0
def verify_heat_resource(to_verify=None,
                         template_name=None,
                         stack_name=None,
                         auth_info=None,
                         fail_ok=False):
    """
        Verify the heat resource creation/deletion for given resources

        Args:
            to_verify (list): Resources to verify creation or deletion.
            template_name (str): template to be used to create heat stack.
            stack_name(str): stack name used to create the stack
            auth_info
            fail_ok

        Returns (int): return 0 if success 1 if failure

    """
    LOG.info("Verifying heat resource {}".format(to_verify))

    rtn_code = 0
    msg = "Heat resource {} appeared".format(to_verify)
    item_verified = to_verify

    if to_verify is 'volume':
        LOG.info("Verifying volume")
        vol_name = getattr(Heat, template_name)['vol_name']
        resource_found = cinder_helper.get_volumes(name=vol_name)

    elif to_verify is 'ceilometer_alarm':
        resource_found = ceilometer_helper.get_alarms(name=stack_name,
                                                      strict=False)

    elif to_verify is 'neutron_port':
        port_name = getattr(Heat, template_name)['port_name']
        if port_name is None:
            port_name = stack_name
        resource_found = network_helper.get_ports(port_name=port_name)

    elif to_verify is 'neutron_provider_net_range':
        resource_found = network_helper.get_network_segment_ranges(
            field='name', physical_network='sample_physnet_X')

    elif to_verify is 'nova_server_group':
        resource_found = nova_helper.get_server_groups(name=stack_name)

    elif to_verify is 'vm':
        vm_name = getattr(Heat, template_name)['vm_name']
        resource_found = vm_helper.get_vms(vms=vm_name, strict=False)

    elif to_verify is 'nova_flavor':
        resource_found = nova_helper.get_flavors(name='sample-flavor')

    elif to_verify is 'neutron_net':
        resource_found = network_helper.get_tenant_net_id(
            net_name='sample-net')

    elif to_verify is 'image':
        resource_found = glance_helper.get_image_id_from_name(
            name='sample_image')

    elif to_verify is 'subnet':
        resource_found = network_helper.get_subnets(name='sample_subnet')

    elif to_verify is 'floating_ip':
        resource_found = network_helper.get_floating_ips()

    elif to_verify is 'router':
        resource_found = network_helper.get_tenant_router(
            router_name='sample_router', auth_info=auth_info)

    elif to_verify is 'router_gateway':
        item_verified = 'sample_gateway_router'
        resource_found = network_helper.get_tenant_router(
            router_name='sample_gateway_router', auth_info=auth_info)
        if resource_found:
            item_verified = to_verify
            resource_found = network_helper.get_router_ext_gateway_info(
                router_id=resource_found, auth_info=auth_info)

    elif to_verify is 'router_interface':
        item_verified = 'sample_if_router'
        router_id = network_helper.get_tenant_router(
            router_name='sample_if_router', auth_info=auth_info)
        resource_found = router_id
        if resource_found:
            item_verified = 'sample_if_subnet'
            subnets = network_helper.get_subnets(name='sample_if_subnet',
                                                 auth_info=auth_info)
            resource_found = subnets
            if resource_found:
                item_verified = to_verify
                router_subnets = network_helper.get_router_subnets(
                    router=router_id, auth_info=auth_info)
                resource_found = resource_found[0] in router_subnets

    elif to_verify is 'security_group':
        resource_found = network_helper.get_security_groups(
            name='SecurityGroupDeluxe')
    elif to_verify is 'key_pair':
        kp_name = getattr(Heat, template_name)['key_pair_name']
        resource_found = nova_helper.get_keypairs(name=kp_name)
    elif to_verify is 'neutron_qos':
        resource_found = network_helper.get_qos_policies(name='SampleQoS',
                                                         auth_info=auth_info)
    else:
        raise ValueError("Unknown item to verify: {}".format(to_verify))

    if not resource_found:
        msg = "Heat stack {} resource {} does not exist".format(
            stack_name, item_verified)
        if fail_ok:
            rtn_code = 1
        else:
            assert resource_found, msg

    LOG.info(msg)
    return rtn_code, msg
Esempio n. 8
0
def _test_cpu_pol_dedicated_shared_coexists(vcpus_dedicated, vcpus_shared, pol_source, boot_source):
    """
    Test two vms coexisting on the same host, one with the dedicated cpu property, and one with the shared cpu property.

    Args:
        vcpus_dedicated: Amount of vcpu(s) to allocate for the vm with the dedicated CPU_POLICY.
        vcpus_shared: Amount of vcpu(s) to allocate for the vm with the shared CPU_POLICY.
        pol_source: Where the CPU_POLICY is set from.
        boot_source: The boot media the vm will use to boot.

    Test Setups:
        - Create two flavors, one for each vm.
        - If using 'flavor' for pol_source, set extra specs for the CPU_POLICY.
        - If using 'image' for pol_source, set ImageMetaData for the CPU_POLICY.
        - If using 'volume' for boot_source, create volume from tis image.
        - If using 'image' for boot_source, use tis image.
        - Determine the amount of free vcpu(s) on the compute before testing.

    Test Steps:
        - Boot the first vm with CPU_POLICY: dedicated.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Determine the amount of free vcpu(s) on the compute.
        - Boot the second vm with CPU_POLICY: shared.
        - Wait until vm is pingable from natbox.
        - Check vm topology for vcpu(s).
        - Delete vms
        - Determine the amount of free vcpu(s) on the compute after testing.
        - Compare free vcpu(s) on the compute before and after testing, ensuring they are the same.

    Test Teardown
        - Delete created volumes and flavors
    """
    LOG.tc_step("Getting host list")
    target_hosts = host_helper.get_hypervisors(state='up')
    target_host = target_hosts[0]
    storage_backing = host_helper.get_host_instance_backing(host=target_host)
    if 'image' in storage_backing:
        storage_backing = 'local_image'
    elif 'remote' in storage_backing:
        storage_backing = 'remote'

    image_id = glance_helper.get_image_id_from_name(GuestImages.DEFAULT['guest'], strict=True)
    pre_test_cpus = host_helper.get_vcpus_for_computes(field='used_now')

    collection = ['dedicated', 'shared']
    vm_ids = []
    for x in collection:
        if x == 'dedicated':
            vcpus = vcpus_dedicated
        else:
            vcpus = vcpus_shared
        LOG.tc_step("Create {} flavor with {} vcpus".format(x, vcpus))
        flavor_id = nova_helper.create_flavor(name=x, vcpus=vcpus, storage_backing=storage_backing)[1]
        ResourceCleanup.add('flavor', flavor_id)

        if pol_source == 'flavor':
            LOG.tc_step("Set CPU_POLICY for {} flavor".format(x))
            specs = {FlavorSpec.CPU_POLICY: x}
            nova_helper.set_flavor(flavor_id, **specs)
        else:
            LOG.tc_step("Create image with CPU_POLICY: {}".format(x))
            image_meta = {ImageMetadata.CPU_POLICY: x}
            image_id = glance_helper.create_image(name='cpu_pol_{}'.format(x), cleanup='function', **image_meta)[1]

        if boot_source == 'volume':
            LOG.tc_step("Create volume from image")
            source_id = cinder_helper.create_volume(name='cpu_pol_{}'.format(x), source_id=image_id)[1]
            ResourceCleanup.add('volume', source_id)
        else:
            source_id = image_id

        pre_boot_cpus = host_helper.get_vcpus_for_computes(field='used_now')
        LOG.tc_step("Booting cpu_pol_{}".format(x))
        vm_id = vm_helper.boot_vm(name='cpu_pol_{}'.format(x), flavor=flavor_id, source=boot_source,
                                  source_id=source_id, avail_zone='nova', vm_host=target_host, cleanup='function')[1]

        vm_ids.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        check_helper.check_topology_of_vm(vm_id, vcpus=vcpus, cpu_pol=x, vm_host=target_host,
                                          prev_total_cpus=pre_boot_cpus[target_host])

    LOG.tc_step("Deleting both dedicated and shared vms")
    vm_helper.delete_vms(vms=vm_ids)

    post_delete_cpus = host_helper.get_vcpus_for_computes(field='used_now')
    assert post_delete_cpus == pre_test_cpus, "vcpu count after test does not equal vcpu count before test"
Esempio n. 9
0
def refstack_setup(refstack_pre_check, request):

    LOG.fixture_step("Enable Swift if not already done")
    storage_helper.modify_swift(enable=True)

    LOG.fixture_step("Create tenants, users, and update quotas")
    compliance_helper.create_tenants_and_update_quotas(add_swift_role=True)

    LOG.fixture_step("Create test flavors")
    flavors = []
    for i in range(2):
        flavor_id = nova_helper.create_flavor(name='refstack', vcpus=2, ram=2048, root_disk=2, cleanup='session')[1]
        nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated',
                                             FlavorSpec.MEM_PAGE_SIZE: 2048})
        flavors.append(flavor_id)

    LOG.fixture_step("Get/create test images")
    images = [glance_helper.get_image_id_from_name()]
    image_id = glance_helper.create_image()[1]
    images.append(image_id)
    ResourceCleanup.add('image', image_id, scope='session')

    LOG.fixture_step("Setup public router if not already done.")
    external_net_id = network_helper.get_networks(external=True)[0]
    public_router = 'public-router0'
    pub_routers = network_helper.get_routers(name=public_router, auth_info=Tenant.get('admin'))
    if not pub_routers:
        LOG.info("Create public router and add interfaces")
        public_router_id = network_helper.create_router(name=public_router, project=Tenant.get('admin')['tenant'])[1]
        network_helper.set_router_gateway(router_id=public_router_id, external_net=external_net_id)

        internal_subnet = 'internal0-subnet0-1'
        gateway = '10.1.1.1'
        network_helper.set_subnet(subnet=internal_subnet, gateway=gateway)
        network_helper.add_router_interface(router=public_router_id, subnet=internal_subnet,
                                            auth_info=Tenant.get('admin'))

    keystone_pub = keystone_helper.get_endpoints(field='URL', interface='public', service_name='keystone')[0]
    keystone_pub_url = keystone_pub.split('/v')[0] + '/'
    keystone_pub_url = keystone_pub_url.replace(':', '\:').replace('/', '\/')

    params_dict = {
        'image_ref': images[0],
        'image_ref_alt': images[1],
        'flavor_ref': flavors[0],
        'flavor_ref_alt': flavors[1],
        'public_network_id': external_net_id,
        'uri': keystone_pub_url + 'v2.0',
        'uri_v3': keystone_pub_url + 'v3',
        'discoverable_apis': 'tempurl,container_quotas',
        'container_sync': 'false',
        'object_versioning': 'true',
        'discoverability': 'false',
    }

    LOG.fixture_step("Update tempest.conf parameters on cumulus server: \n{}".format(params_dict))
    with compliance_helper.ssh_to_compliance_server() as server_ssh:
        for key, val in params_dict.items():
            server_ssh.exec_cmd('sed -i "s/^{} =.*/{} = {}/g" {}'.format(key, key, val, RefStack.TEMPEST_CONF),
                                fail_ok=False)
            server_ssh.exec_cmd('grep {} {}'.format(val, RefStack.TEMPEST_CONF), fail_ok=False)

        compliance_helper.add_route_for_vm_access(server_ssh)

    def scp_logs():
        LOG.info("scp test results files from refstack test host to local automation dir")
        dest_dir = os.path.join(ProjVar.get_var('LOG_DIR'), 'compliance')
        os.makedirs(dest_dir, exist_ok=True)
        localhost = LocalHostClient()
        localhost.connect()

        for item in RefStack.LOG_FILES:
            source_path = '{}/{}'.format(RefStack.TEST_HISTORY_DIR, item)
            localhost.scp_on_dest(source_ip=ComplianceCreds.get_host(), source_user=ComplianceCreds.get_user(),
                                  source_pswd=ComplianceCreds.get_password(), source_path=source_path,
                                  dest_path=dest_dir, timeout=300, cleanup=False)

        origin_name = ComplianceVar.get_var('REFSTACK_SUITE').rsplit(r'/', maxsplit=1)[-1]
        localhost.exec_cmd('mv {}/test-list.txt {}/{}'.format(dest_dir, dest_dir, origin_name))
    request.addfinalizer(scp_logs)