Esempio n. 1
0
def adjust_vm_quota(vm_count, con_ssh, backup_info=None):
    """
    Increase the quotas for creating VM if needed for the tenant in testing.
    The following quotas if any will be changed:
        instances
        cores       - make sure quota allows 2 cores for each VM
        ram         - make sure 2M for each VM

    Args:
        vm_count:
            - number of VMs

        con_ssh:
            - current ssh connection

        backup_info:
            - backup options for doing System Backup

    Return:
        None
    """

    tenant = backup_info['tenant']
    quota_details = vm_helper.get_quota_details_info(
        'compute', resources='instances', tenant=tenant)['instances']
    min_instances_quota = vm_count + quota_details['in use'] + quota_details[
        'reserved']

    if min_instances_quota > quota_details['limit']:
        LOG.info('Insufficient quota for instances, increase to: {}'.format(
            min_instances_quota))
        vm_helper.ensure_vms_quotas(vms_num=min_instances_quota,
                                    tenant=tenant,
                                    con_ssh=con_ssh)
Esempio n. 2
0
    def test_lock_with_max_vms_simplex(self, simplex_only):
        vms_num = host_helper.get_max_vms_supported(host='controller-0')
        vm_helper.ensure_vms_quotas(vms_num=vms_num)

        LOG.tc_step(
            "Boot {} vms with various storage settings".format(vms_num))
        vms = vm_helper.boot_vms_various_types(cleanup='function',
                                               vms_num=vms_num)

        LOG.tc_step("Lock vm host on simplex system")
        HostsToRecover.add('controller-0')
        host_helper.lock_host('controller-0')

        LOG.tc_step("Ensure vms are in {} state after locked host come "
                    "online".format(VMStatus.STOPPED))
        vm_helper.wait_for_vms_values(vms,
                                      value=VMStatus.STOPPED,
                                      fail_ok=False)

        LOG.tc_step("Unlock host on simplex system")
        host_helper.unlock_host(host='controller-0')

        LOG.tc_step("Ensure vms are Active and Pingable from NatBox")
        vm_helper.wait_for_vms_values(vms,
                                      value=VMStatus.ACTIVE,
                                      fail_ok=False,
                                      timeout=600)
        for vm in vms:
            vm_helper.wait_for_vm_pingable_from_natbox(
                vm, timeout=VMTimeout.DHCP_RETRY)
Esempio n. 3
0
def pre_check(request):
    """
    This is to adjust the quota
    return: code 0/1
    """
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 3:
        skip('Large heat tests require 3+ hypervisors')

    # disable remote cli for these testcases
    remote_cli = ProjVar.get_var('REMOTE_CLI')
    if remote_cli:
        ProjVar.set_var(REMOTE_CLI=False)

        def revert():
            ProjVar.set_var(REMOTE_CLI=remote_cli)
        request.addfinalizer(revert)

    vm_helper.set_quotas(networks=100)
    vm_helper.ensure_vms_quotas(cores_num=100, vols_num=100, vms_num=100)

    def list_status():
        LOG.fixture_step("Listing heat resources and nova migrations")
        stacks = heat_helper.get_stacks(auth_info=Tenant.get('admin'))
        for stack in stacks:
            heat_helper.get_stack_resources(stack=stack, auth_info=Tenant.get('admin'))

        nova_helper.get_migration_list_table()
    request.addfinalizer(list_status)
Esempio n. 4
0
def check_system():
    storage_backing, hosts = host_helper.get_storage_backing_with_max_hosts()
    up_hypervisors = host_helper.get_up_hypervisors()
    if not up_hypervisors:
        skip('No up hypervisor on system')

    vm_helper.ensure_vms_quotas(vms_num=10, cores_num=20, vols_num=10)

    return hosts, storage_backing, up_hypervisors
Esempio n. 5
0
def skip_test_if_less_than_two_hosts(no_simplex):
    hypervisors = host_helper.get_up_hypervisors()
    if len(hypervisors) < 2:
        skip(SkipHypervisor.LESS_THAN_TWO_HYPERVISORS)

    LOG.fixture_step(
        "Update instance and volume quota to at least 10 and 20 respectively")
    vm_helper.ensure_vms_quotas(vms_num=10)

    return len(hypervisors)
def hosts_pci_device_info():
    # get lab host list
    actual_hosts_device_info = {}
    compute_hosts = host_helper.get_up_hypervisors()
    for host in compute_hosts:
        device_info = host_helper.get_host_pci_devices(host, dev_class='Co-processor')
        if device_info:
            actual_hosts_device_info[host] = device_info
    LOG.info("Hosts device info: {}".format(actual_hosts_device_info))

    if not actual_hosts_device_info:
        skip("co-processor PCI device not found")

    hosts_device_info = {}
    sys_host_fields = ('address', 'name', 'vendor id', 'device id')
    for host in actual_hosts_device_info:
        sys_devs = host_helper.get_host_devices(host, field=sys_host_fields)
        actual_pci_devs = actual_hosts_device_info[host]
        hosts_device_info[host] = []
        for dev_info in actual_pci_devs:
            actual_pci_addr, actual_vendor_name, actual_dev_name, vf_dev_id, vf_count = dev_info
            actual_pci_addr = '0000:{}'.format(actual_pci_addr)
            assert actual_pci_addr in sys_devs[0], "Existing Co-processor pci device is not " \
                                                   "listed in system host-device-list"

            hosts_with_dev = [host_ for host_, devs_ in actual_hosts_device_info.items() if
                              actual_dev_name in [dev[2] for dev in devs_]]
            if len(hosts_with_dev) < len(actual_hosts_device_info):
                LOG.info('QAT dev {} is only configured on {}'.format(actual_dev_name,
                                                                      hosts_with_dev))
                continue

            dev_name = actual_dev_name.split(maxsplit=1)[0].lower()
            index = sys_devs[0].index(actual_pci_addr)
            pci_addr, name, vendor_id, device_id = list(zip(*sys_devs))[index]
            dev_info_dict = {'pci_address': pci_addr,
                             'pci_name': name,
                             'vendor_id': vendor_id,
                             'device_id': device_id,
                             'vf_device_id': vf_dev_id,
                             'vf_count': vf_count,
                             'pci_alias': 'qat-{}-vf'.format(dev_name),
                            }

            hosts_device_info[host].append(dev_info_dict)

    hosts_device_info = {k: v for k, v in hosts_device_info.items() if v}
    if not hosts_device_info:
        skip('No common QAT device configured on computes. Skip test.')

    LOG.info('QAT devices to use for test: {}'.format(hosts_device_info))
    vm_helper.ensure_vms_quotas(vms_num=20)
    return hosts_device_info
def _vms():
    vm_helper.ensure_vms_quotas(vms_num=8)
    glance_helper.get_guest_image(guest_os=GUEST_OS, cleanup='module')

    LOG.fixture_step("Create a favor with dedicated cpu policy")
    flavor_id = nova_helper.create_flavor(name='dedicated-ubuntu',
                                          guest_os=GUEST_OS)[1]
    ResourceCleanup.add('flavor', flavor_id, scope='module')
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.CPU_POLICY: 'dedicated'})

    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()
    tenant_net_ids = network_helper.get_tenant_net_ids()
    if len(tenant_net_ids) < VMS_COUNT:
        tenant_net_ids += tenant_net_ids
    assert len(tenant_net_ids) >= VMS_COUNT

    vif = 'avp' if system_helper.is_avs() else 'virtio'
    vm_vif_models = {
        'virtio_vm1': ('virtio', tenant_net_ids[0]),
        '{}_vm1'.format(vif): (vif, tenant_net_ids[1]),
        'virtio_vm2': ('virtio', tenant_net_ids[2]),
        '{}_vm2'.format(vif): (vif, tenant_net_ids[3])
    }

    vms = []
    for vm_name, vifs in vm_vif_models.items():
        vif_model, tenant_net_id = vifs
        nics = [{
            'net-id': mgmt_net_id
        }, {
            'net-id': tenant_net_id,
            'vif-model': vif_model
        }, {
            'net-id': internal_net_id,
            'vif-model': vif_model
        }]

        LOG.fixture_step(
            "Boot a ubuntu14 vm with {} nics from above flavor and volume".
            format(vif_model))
        vm_id = vm_helper.boot_vm(vm_name,
                                  flavor=flavor_id,
                                  source='volume',
                                  cleanup='module',
                                  nics=nics,
                                  guest_os=GUEST_OS)[1]
        vms.append(vm_id)

    return vms
Esempio n. 8
0
def add_hosts_to_zone(request, add_admin_role_class, add_cgcsauto_zone, reserve_unreserve_all_hosts_module):
    storage_backing, target_hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
    if len(target_hosts) < 2:
        skip("Less than two up hosts have same storage backing")

    LOG.fixture_step("Update instance and volume quota to at least 10 and 20 respectively")
    vm_helper.ensure_vms_quotas()

    hosts_to_add = target_hosts[:2]
    nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto', hosts=hosts_to_add)

    def remove_hosts_from_zone():
        nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto', check_first=False)
    request.addfinalizer(remove_hosts_from_zone)

    return storage_backing, hosts_to_add
Esempio n. 9
0
def setups(no_simplex):
    vm_helper.ensure_vms_quotas(vms_num=10, cores_num=20, vols_num=10)
    storage_backing, hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
    if len(hosts) < 2:
        skip("Less than two hosts with in same storage aggregate")

    LOG.fixture_step("Create a flavor with server group messaging enabled")
    flavor_id = nova_helper.create_flavor('srv_grp_msg', storage_backing=storage_backing)[1]
    nova_helper.set_flavor(flavor_id, **{FlavorSpec.SRV_GRP_MSG: True})

    LOG.fixture_step("Create affinity and anti-affinity server groups")
    affinity_grp = nova_helper.create_server_group(policy='affinity')[1]

    policy = 'soft_anti_affinity' if len(hosts) < 3 else 'anti_affinity'
    anti_affinity_grp = nova_helper.create_server_group(policy=policy)[1]

    return hosts, flavor_id, {'affinity': affinity_grp, 'anti_affinity': anti_affinity_grp}
Esempio n. 10
0
def get_cpu_count(hosts_with_backing):
    LOG.fixture_step("Find suitable vm host and cpu count and backing of host")
    compute_space_dict = {}

    vm_host = hosts_with_backing[0]
    numa0_used_cpus, numa0_total_cpus = host_helper.get_vcpus_per_proc(
        vm_host)[vm_host][0]
    numa0_avail_cpus = len(numa0_total_cpus) - len(numa0_used_cpus)
    for host in hosts_with_backing:
        free_space = get_disk_avail_least(host)
        compute_space_dict[host] = free_space
        LOG.info("{} space on {}".format(free_space, host))

    # increase quota
    LOG.fixture_step("Increase quota of allotted cores")
    vm_helper.ensure_vms_quotas(cores_num=int(numa0_avail_cpus + 30))

    return vm_host, numa0_avail_cpus, compute_space_dict
    def setup_quota_and_hosts(self, request, add_admin_role_class,
                              add_cgcsauto_zone):
        vm_helper.ensure_vms_quotas(vms_num=10, cores_num=50, vols_num=20)

        storage_backing, target_hosts = host_helper.get_storage_backing_with_max_hosts(
        )
        if len(target_hosts) < 2:
            skip("Less than two up hosts have same storage backing")

        hosts_to_add = target_hosts[:2]
        nova_helper.add_hosts_to_aggregate(aggregate='cgcsauto',
                                           hosts=hosts_to_add)

        def remove_hosts_from_zone():
            nova_helper.remove_hosts_from_aggregate(aggregate='cgcsauto',
                                                    check_first=False)

        request.addfinalizer(remove_hosts_from_zone)

        return storage_backing, hosts_to_add
def _test_ea_max_vms_with_crypto_vfs(_flavors, hosts_pci_device_info):
    """
    Verify maximum number of guests with Crypto VFs can be launched and
    stabilized

    Args:
        _flavors:
        hosts_pci_device_info:

    Returns:

    """

    LOG.info("Pci device  {}".format(hosts_pci_device_info))

    flavor_id = _flavors['flavor_qat_vf_4']
    # Assume we only have 1 coleto creek pci device on system
    crypto_hosts = list(hosts_pci_device_info.keys())
    host = crypto_hosts[0]
    vf_info = hosts_pci_device_info[host][0]
    vf_device_id = vf_info['vf_device_id']
    vf_count = vf_info['vf_count']
    LOG.info("Vf_device_id {}, count: {}".format(vf_device_id, vf_count))

    # number of vms to launch to max out the total configured device VFs. Each VM is launched with 4 Vfs. 4 Vfs in each
    # compute are reserved for resize nova action.

    number_of_vms = int((vf_count - 4 * len(crypto_hosts)) / 4)

    mgmt_net_id = network_helper.get_mgmt_net_id()
    tenant_net_id = network_helper.get_tenant_net_id()
    vif_type = get_vif_type()

    nics = [{'net-id': mgmt_net_id},
            {'net-id': tenant_net_id, 'vif-model': vif_type}]

    vm_helper.ensure_vms_quotas(number_of_vms + 10)

    vms = {}
    LOG.tc_step("Launch {} vms using flavor flavor_qat_vf_4 and nics {}".format(number_of_vms, nics))
    for i in range(1, number_of_vms + 1):
        vm_name = 'vm_crypto_{}'.format(i)
        vm_id = vm_helper.boot_vm(cleanup='function', name='vm_crypto_{}'.format(i), nics=nics, flavor=flavor_id)[1]
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id)
        vms[vm_name] = vm_id

    for vm_name_, vm_id_ in vms.items():
        vm_host = vm_helper.get_vm_host(vm_id_)
        host_dev_name = hosts_pci_device_info[vm_host][0]['device_name']
        expt_qat_devs = {host_dev_name: 4}
        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)

        LOG.info("Checking if other host has room for cold migrate vm {}".format(vm_name_))
        for host_ in crypto_hosts:
            if host_ != vm_host:
                total_vfs, used_vfs = network_helper.get_pci_device_vfs_counts_for_host(
                    host_, device_id=vf_device_id, fields=('pci_vfs_configured', 'pci_vfs_used'))

                if int(total_vfs) - int(used_vfs) >= 4:
                    LOG.info("Migrate to other host is possible")
                    expt_res = 0
                    break
        else:
            LOG.info("Migrate to other host is not possible")
            expt_res = 2

        LOG.tc_step("Attempt to cold migrate {} and ensure it {}".format(vm_name_,
                                                                         'succeeds' if expt_res == '0' else 'fails'))
        rc, msg = vm_helper.cold_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert expt_res == rc, "Expected: {}. Actual: {}".format(expt_res, msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        LOG.tc_step("Suspend/resume VM {} ....".format(vm_name_))
        vm_helper.suspend_vm(vm_id_)
        vm_helper.resume_vm(vm_id_)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # vm_host = nova_helper.get_vm_host(vm_id_)
        # total, used = network_helper.get_pci_device_vfs_counts_for_host(vm_host, vf_device_id)[0]
        # if (total - int(used)) >= 4:
        #     expt_res = 0

        flavor_resize_id = _flavors['flavor_resize_qat_vf_4']
        LOG.tc_step("Resize VM {} to new flavor {} with increased memory...".format(vm_name_, flavor_resize_id))
        vm_helper.resize_vm(vm_id_, flavor_resize_id)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        # else:
        #     expt_res = 1
        #     LOG.info("Resizing of vm {} skipped; host {} max out vfs; used vfs = {}".format(vm_name_, vm_host, used))

        LOG.tc_step("Attempt to live migrate {} and ensure it's rejected".format(vm_name_))
        rc, msg = vm_helper.live_migrate_vm(vm_id=vm_id_, fail_ok=True)
        assert 6 == rc, "Expect live migration to fail on vm with pci alias device. Actual: {}".format(msg)
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id_)

        check_helper.check_qat_service(vm_id=vm_id_, qat_devs=expt_qat_devs)
Esempio n. 13
0
    def test_lock_with_vms(self, target_hosts, no_simplex,
                           add_admin_role_func):
        """
        Test lock host with vms on it.

        Args:
            target_hosts (list): targeted host(s) to lock that was prepared
            by the target_hosts test fixture.

        Skip Conditions:
            - Less than 2 hypervisor hosts on the system

        Prerequisites:
            - Hosts storage backing are pre-configured to storage backing
            under test
                ie., 2 or more hosts should support the storage backing under
                test.
        Test Setups:
            - Set instances quota to 10 if it was less than 8
            - Determine storage backing(s) under test. i.e.,storage backings
            supported by at least 2 hosts on the system
            - Create flavors with storage extra specs set based on storage
            backings under test
            - Create vms_to_test that can be live migrated using created flavors
            - Determine target host(s) to perform lock based on which host(s)
            have the most vms_to_test
            - Live migrate vms to target host(s)
        Test Steps:
            - Lock target host
            - Verify lock succeeded and vms status unchanged
            - Repeat above steps if more than one target host
        Test Teardown:
            - Delete created vms and volumes
            - Delete created flavors
            - Unlock locked target host(s)

        """
        storage_backing, host = target_hosts
        vms_num = 5
        vm_helper.ensure_vms_quotas(vms_num=vms_num)

        LOG.tc_step(
            "Boot {} vms with various storage settings".format(vms_num))
        vms = vm_helper.boot_vms_various_types(cleanup='function',
                                               vms_num=vms_num,
                                               storage_backing=storage_backing,
                                               target_host=host)

        LOG.tc_step("Attempt to lock target host {}...".format(host))
        HostsToRecover.add(host)
        host_helper.lock_host(host=host,
                              check_first=False,
                              fail_ok=False,
                              swact=True)

        LOG.tc_step("Verify lock succeeded and vms still in good state")
        vm_helper.wait_for_vms_values(vms=vms, fail_ok=False)
        for vm in vms:
            vm_host = vm_helper.get_vm_host(vm_id=vm)
            assert vm_host != host, "VM is still on {} after lock".format(host)

            vm_helper.wait_for_vm_pingable_from_natbox(
                vm_id=vm, timeout=VMTimeout.DHCP_RETRY)
Esempio n. 14
0
def update_instances_quota():
    vm_helper.ensure_vms_quotas()
Esempio n. 15
0
def test_dvr_vms_network_connection(vms_num, srv_grp_policy, server_groups,
                                    router_info):
    """
    Test vms East West connection by pinging vms' data network from vm

    Args:
        vms_num (int): number of vms to boot
        srv_grp_policy (str): affinity to boot vms on same host, anti-affinity to boot vms on
            different hosts
        server_groups: test fixture to return affinity and anti-affinity server groups
        router_info (str): id of tenant router

    Skip Conditions:
        - Only one nova host on the system

    Setups:
        - Enable DVR    (module)

    Test Steps
        - Update router to distributed if not already done
        - Boot given number of vms with specific server group policy to schedule vms on
            same or different host(s)
        - Ping vms' over data and management networks from one vm to test NS and EW traffic

    Teardown:
        - Delete vms
        - Revert router to

    """
    # Increase instance quota count if needed
    current_vms = len(vm_helper.get_vms(strict=False))
    quota_needed = current_vms + vms_num
    vm_helper.ensure_vms_quotas(quota_needed)

    if srv_grp_policy == 'anti-affinity' and len(
            host_helper.get_up_hypervisors()) == 1:
        skip("Only one nova host on the system.")

    LOG.tc_step("Update router to distributed if not already done")
    router_id = router_info
    is_dvr = network_helper.get_router_values(router_id,
                                              fields='distributed',
                                              auth_info=Tenant.get('admin'))[0]
    if not is_dvr:
        network_helper.set_router_mode(router_id, distributed=True)

    LOG.tc_step("Boot {} vms with server group policy {}".format(
        vms_num, srv_grp_policy))
    affinity_grp, anti_affinity_grp = server_groups(soft=True)
    srv_grp_id = affinity_grp if srv_grp_policy == 'affinity' else anti_affinity_grp

    vms = []
    tenant_net_id = network_helper.get_tenant_net_id()
    mgmt_net_id = network_helper.get_mgmt_net_id()
    internal_net_id = network_helper.get_internal_net_id()

    internal_vif = {'net-id': internal_net_id}
    if system_helper.is_avs():
        internal_vif['vif-model'] = 'avp'

    nics = [{'net-id': mgmt_net_id}, {'net-id': tenant_net_id}, internal_vif]
    for i in range(vms_num):
        vol = cinder_helper.create_volume()[1]
        ResourceCleanup.add(resource_type='volume', resource_id=vol)
        vm_id = vm_helper.boot_vm('dvr_ew_traffic',
                                  source='volume',
                                  source_id=vol,
                                  nics=nics,
                                  cleanup='function',
                                  hint={'group': srv_grp_id})[1]
        vms.append(vm_id)
        LOG.tc_step("Wait for vm {} pingable from NatBox".format(vm_id))
        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

    from_vm = vms[0]
    LOG.tc_step(
        "Ping vms over management and data networks from vm {}, and verify "
        "ping successful.".format(from_vm))
    vm_helper.ping_vms_from_vm(to_vms=vms,
                               from_vm=from_vm,
                               fail_ok=False,
                               net_types=['data', 'mgmt', 'internal'])
Esempio n. 16
0
    def add_shared_cpu(self, no_simplex, config_host_class, request):
        """
        This fixture ensures at least two hypervisors are configured with shared cpu on proc0 and proc1
        It also reverts the configs at the end.

        Args:
            no_simplex:
            config_host_class:
            request:

        Returns:

        """
        storage_backing, hosts = keywords.host_helper.get_storage_backing_with_max_hosts()
        if len(hosts) < 2:
            skip("Less than two hypervisors with same storage backend")

        LOG.fixture_step("Ensure at least two hypervisors has shared cpu cores on both p0 and p1")
        shared_cpu_hosts = []
        shared_disabled_hosts = {}
        modified_hosts = []

        for host_ in hosts:
            shared_cores_for_host = host_helper.get_host_cpu_cores_for_function(hostname=host_, func='shared')
            if 1 not in shared_cores_for_host:
                LOG.info("{} has only 1 processor. Ignore.".format(host_))
                continue

            if shared_cores_for_host[0] and shared_cores_for_host[1]:
                shared_cpu_hosts.append(host_)
                if len(shared_cpu_hosts) == 2:
                    break
            else:
                shared_disabled_hosts[host_] = shared_cores_for_host
        else:
            if len(shared_disabled_hosts) + len(shared_cpu_hosts) < 2:
                skip("Less than two up hypervisors with 2 processors")

            def _modify(host_to_modify):
                host_helper.modify_host_cpu(host_to_modify, 'shared', p0=1, p1=1)
                host_helper.modify_host_memory(host_to_modify, proc=0, gib_1g=4)

            for host_to_config in shared_disabled_hosts:
                config_host_class(host=host_to_config, modify_func=_modify)
                host_helper.wait_for_hypervisors_up(host_to_config)
                host_helper.wait_for_memory_update(host_to_config)
                check_host_cpu_and_memory(host_to_config, expt_shared_cpu={0: 1, 1: 1}, expt_1g_page={0: 4})
                shared_cpu_hosts.append(host_to_config)
                modified_hosts.append(host_to_config)
                if len(shared_cpu_hosts) >= 2:
                    break

            def revert():
                for host_to_revert in modified_hosts:
                    check_host_cpu_and_memory(host_to_revert, expt_shared_cpu={0: 1, 1: 1}, expt_1g_page={0: 4})
                    p0_shared = len(shared_disabled_hosts[host_to_revert][0])
                    p1_shared = len(shared_disabled_hosts[host_to_revert][1])
                    try:
                        LOG.fixture_step("Revert {} shared cpu and memory setting".format(host_to_revert))
                        host_helper.lock_host(host_to_revert)
                        host_helper.modify_host_cpu(host_to_revert, 'shared', p0=p0_shared, p1=p1_shared)
                        host_helper.modify_host_memory(host_to_revert, proc=0, gib_1g=0)
                    finally:
                        host_helper.unlock_host(host_to_revert)
                        host_helper.wait_for_memory_update(host_to_revert)

                    check_host_cpu_and_memory(host_to_revert,
                                              expt_shared_cpu={0: p0_shared, 1: p1_shared}, expt_1g_page={0: 0})
            request.addfinalizer(revert)

        max_vcpus_proc0 = 0
        max_vcpus_proc1 = 0
        host_max_proc0 = None
        host_max_proc1 = None

        LOG.fixture_step("Get VMs cores for each host")
        for host in shared_cpu_hosts:
            vm_cores_per_proc = host_helper.get_host_cpu_cores_for_function(host, func='Application', thread=None)
            if len(vm_cores_per_proc[0]) > max_vcpus_proc0:
                max_vcpus_proc0 = len(vm_cores_per_proc[0])
                host_max_proc0 = host
            if len(vm_cores_per_proc.get(1, [])) > max_vcpus_proc1:
                max_vcpus_proc1 = len(vm_cores_per_proc.get(1, []))
                host_max_proc1 = host

        LOG.fixture_step("Increase quota of allotted cores")
        vm_helper.ensure_vms_quotas(cores_num=(max(max_vcpus_proc0, max_vcpus_proc1) + 1))

        return storage_backing, shared_cpu_hosts, [(max_vcpus_proc0, host_max_proc0), (max_vcpus_proc1, host_max_proc1)]
Esempio n. 17
0
def update_quotas(add_admin_role_module):
    LOG.fixture_step("Update instance and volume quota to at least 10 and "
                     "20 respectively")
    vm_helper.ensure_vms_quotas()
Esempio n. 18
0
def _test_pci_resource_usage(vif_model_check):
    """
    Create a vm under test with specified vifs for tenant network

    Returns (str): id of vm under test

    """
    vif_model, base_vm, flavor_id, nics_to_test, seg_id, net_type, pnet_name, extra_pcipt_net_name, extra_pcipt_net = \
        vif_model_check

    LOG.tc_step("Ensure core/vm quota is sufficient")

    if 'sriov' in vif_model:
        vm_type = 'sriov'
        resource_param = 'pci_vfs_used'
        max_resource = 'pci_vfs_configured'
    else:
        vm_type = 'pcipt'
        resource_param = 'pci_pfs_used'
        max_resource = 'pci_pfs_configured'

    LOG.tc_step(
        "Get resource usage for {} interface before booting VM(s)".format(
            vif_model))
    LOG.info("provider net for {} interface: {}".format(vif_model, pnet_name))

    assert pnet_name, "provider network for {} interface is not found".format(
        vif_model)

    total_val, pre_resource_value = nova_helper.get_pci_interface_stats_for_providernet(
        pnet_name, fields=(max_resource, resource_param))
    LOG.info("Resource Usage {} for {}. Resource configured: {}".format(
        pre_resource_value, vif_model, total_val))

    expt_change = 2 if vif_model == 'pci-passthrough' and extra_pcipt_net else 1
    vm_limit = int((total_val - pre_resource_value) /
                   expt_change) if vif_model == 'pci-passthrough' else 5
    vm_helper.ensure_vms_quotas(vm_limit + 5)
    vms_under_test = []
    for i in range(vm_limit):
        LOG.tc_step("Boot a vm with {} vif model on {} net".format(
            vif_model, net_type))
        vm_id = vm_helper.boot_vm(name=vif_model,
                                  flavor=flavor_id,
                                  cleanup='function',
                                  nics=nics_to_test)[1]
        vms_under_test.append(vm_id)

        vm_helper.wait_for_vm_pingable_from_natbox(vm_id, fail_ok=False)

        if vm_type == 'pcipt':
            LOG.tc_step("Add vlan to pci-passthrough interface for VM.")
            vm_helper.add_vlan_for_vm_pcipt_interfaces(vm_id=vm_id,
                                                       net_seg_id=seg_id)

        LOG.tc_step(
            "Ping vm over mgmt and {} nets from itself".format(net_type))
        vm_helper.ping_vms_from_vm(to_vms=vm_id,
                                   from_vm=vm_id,
                                   net_types=['mgmt', net_type])

        LOG.tc_step(
            "Check resource usage for {} interface increased by 1".format(
                vif_model))
        resource_value = nova_helper.get_provider_net_info(
            pnet_name, field=resource_param)
        assert pre_resource_value + expt_change == resource_value, "Resource usage for {} is not increased by {}". \
            format(vif_model, expt_change)

        pre_resource_value = resource_value

    for vm_to_del in vms_under_test:
        LOG.tc_step(
            "Check resource usage for {} interface reduced by 1 after deleting a vm"
            .format(vif_model))
        vm_helper.delete_vms(vm_to_del, check_first=False, stop_first=False)
        resource_val = common.wait_for_val_from_func(
            expt_val=pre_resource_value - expt_change,
            timeout=30,
            check_interval=3,
            func=nova_helper.get_provider_net_info,
            providernet_id=pnet_name,
            field=resource_param)[1]

        assert pre_resource_value - expt_change == resource_val, "Resource usage for {} is not reduced by {}". \
            format(vif_model, expt_change)
        pre_resource_value = resource_val