def boot_vms_to_allocate_hp(self, os_conn, env, host, page_size, net,
                                ram_left_free=0):
        """Boot vms to allocate required count of huge pages

        :param os_conn: os_conn
        :param env: env
        :param host: fqdn hostname
        :param page_size: size of huge pages. Usually it's 1048576 or 2048
        :param net: network to vm
        :param ram_left_free: ram in MBs required to be free
        :return:
        """
        hps = get_hp_distribution_per_numa_node(env)[host]
        flv_sizes = [(numa, hps[numa][page_size]['free'] * page_size / 1024)
                     for numa, hp in hps.items() if hp[page_size]['free'] != 0]
        flv_sizes.sort(key=lambda i: i[1], reverse=True)

        for numa, size in flv_sizes:
            flv = os_conn.nova.flavors.create(
                name='flv_{0}_{1}'.format(numa, page_size),
                ram=size - ram_left_free, vcpus=1, disk=1)
            self.created_flvs.append(flv)
            flv.set_keys({'hw:mem_page_size': page_size})
            os_conn.create_server(name='vm_{0}'.format(numa), flavor=flv.id,
                                  nics=[{'net-id': net}],
                                  availability_zone='nova:{}'.format(host),
                                  wait_for_avaliable=False)
def computes_for_mixed_hp_and_numa(os_conn, env, computes_with_mixed_hp,
                                   computes_with_numa_nodes):
    hosts = list(
        set(computes_with_mixed_hp) & set(computes_with_numa_nodes))
    conf_cpu = get_cpu_distribition_per_numa_node(env)
    conf_hp = get_hp_distribution_per_numa_node(env, numa_count=2)
    for host in hosts:
        cpu0 = len(conf_cpu[host]['numa0'])
        cpu1 = len(conf_cpu[host]['numa1'])
        if cpu0 < 4 or cpu1 < 4:
            hosts.remove(host)
    for host in hosts:
        hp2mb_0 = conf_hp[host]['numa0'][page_2mb]['total']
        hp1gb_0 = conf_hp[host]['numa0'][page_1gb]['total']
        hp2mb_1 = conf_hp[host]['numa1'][page_2mb]['total']
        hp1gb_1 = conf_hp[host]['numa1'][page_1gb]['total']
        if hp2mb_0 < 1024 or hp2mb_1 < 1024 or hp1gb_0 < 2 or hp1gb_1 < 2:
            hosts.remove(host)
    if len(hosts) < 2:
        pytest.skip("Insufficient count of computes")
    return hosts
    def test_negative_lack_of_resources_on_pci_device(
            self, os_conn, env, ubuntu_image_id, keypair, mixed_hosts,
            vf_ports, flavors, networks, cleanup):
        """This test checks error state for vm when resources are not enough
            on pci device.

            Steps:
            1. Create network net1 with subnet
            2. Create router, set gateway and add interface for the network
            3. Create flavor for 2Mb huge pages
            4. Create flavor for cpu pinning with hw:numa_nodes=1
            5. Boot vm with the 1st flavor with vf_port on numa without pci
            device (usually it's numa1)
            6. Check that vms are in error state since no pci device found
            7. Redo for the 2nd flavor
        """
        host = mixed_hosts[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]
        hps = get_hp_distribution_per_numa_node(env)[host]

        # Calculate number of vcpus/huge pages for each numa in order to occupy
        #  all of them. Usually pci device is on numa1 => next step
        # (i.e. remove vm from numa0) allows to get numa with huge pages and
        # cpu pinning, but without sr-iov
        vms = {}

        # we need to order cpus so that numa with more cpus booted at first
        sorted_cpus = sorted(cpus.items(), key=lambda x: len(x[1]),
                             reverse=True)
        for numa, cpu_list in sorted_cpus:
            free_2mb = hps[numa][page_2mb]['free']
            flv = os_conn.nova.flavors.create(name='flavor_{}'.format(numa),
                                              ram=free_2mb * 2, disk=5,
                                              vcpus=len(cpu_list))
            self.created_flvs.append(flv)
            flv.set_keys({'hw:cpu_policy': 'dedicated',
                          'aggregate_instance_extra_specs:pinned': 'true',
                          'hw:numa_nodes': 1,
                          'hw:mem_page_size': page_2mb})

            vm = os_conn.create_server(
                name='vm_to_{0}'.format(numa), image_id=ubuntu_image_id,
                key_name=keypair.name, nics=[{'net-id': networks[0]}],
                availability_zone='nova:{}'.format(host), flavor=flv.id,
                wait_for_avaliable=False)
            nodeset = self.get_nodesets_for_vm(os_conn, vm)[0]
            assert numa == "numa{0}".format(nodeset), (
                "Nodeset used for {0} should be {1}, but it's {2}. "
                "It's critical for this test since pci device is on numa1 only"
                .format(vm, numa, "numa{0}".format(nodeset)))
            vms[numa] = vm

        # Remove vm from numa0
        vms['numa0'].delete()
        os_conn.wait_servers_deleted([vms['numa0']])

        # Boot vms with pci device
        for i, flavor in enumerate(flavors):
            with pytest.raises(InstanceError) as e:
                os_conn.create_server(
                    name='vm', image_id=ubuntu_image_id,
                    key_name=keypair.name, flavor=flavor.id,
                    availability_zone='nova:{}'.format(host),
                    nics=[{'port-id': vf_ports[i]}])
            expected_message = ("Insufficient compute resources: "
                                "Requested instance NUMA topology together "
                                "with requested PCI devices cannot fit the "
                                "given host NUMA topology")
            logger.info("Instance status is error:\n{0}".format(str(e.value)))
            assert expected_message in str(e.value), "Unexpected reason"