def test_negative_distribution_less_resources(self, env, os_conn, networks,
                                                  security_group, resource,
                                                  aggregate, keypair, cleanup):
        """This test checks that vm is in error state if at least one numa node
        has insufficient resources
        Steps:
            1. Create flavor with numa_cpu and numa_mem distribution
            2. Create net1 with subnet, net2 with subnet and router1 with
                interfaces to both nets
            3. Launch vm using created flavor
            4. Check that vm in error state
        """
        host = aggregate.hosts[0]
        host_cpus = get_cpu_distribition_per_numa_node(env)[host]
        host_mem = get_memory_distribition_per_numa_node(env)[host]
        total_cpu = len(host_cpus['numa0']) + len(host_cpus['numa1'])
        host_mem0 = math.ceil(host_mem['numa0'] / 1024)
        host_mem1 = math.ceil(host_mem['numa1'] / 1024)

        # Calculate flavor metadata values that would lead to the error by
        # allocating more resources than available for numa node
        if resource == 'cpu':
            cnt_to_exceed = len(host_cpus['numa0']) + 1
            cpu_numa0 = self.get_flavor_cpus(range(total_cpu)[:cnt_to_exceed])
            cpu_numa1 = self.get_flavor_cpus(range(total_cpu)[cnt_to_exceed:])
            mem_numa0 = int(host_mem0 / 2)
            mem_numa1 = int(host_mem1 / 2)
        else:
            correct_cnt = len(host_cpus['numa0'])
            cpu_numa0 = self.get_flavor_cpus(range(total_cpu)[:correct_cnt])
            cpu_numa1 = self.get_flavor_cpus(range(total_cpu)[correct_cnt:])
            mem_numa0 = int(max(host_mem0, host_mem1) +
                            min(host_mem0, host_mem1) / 2)
            mem_numa1 = int(host_mem1 - mem_numa0)

        # Create flavor with params and metadata depending on resources
        flv = os_conn.nova.flavors.create(name='flv',
                                          ram=mem_numa0 + mem_numa1,
                                          vcpus=total_cpu, disk=1)
        self.created_flvs.append(flv)
        flv.set_keys({
            'aggregate_instance_extra_specs:pinned': 'true',
            'hw:cpu_policy': 'dedicated', 'hw:numa_nodes': 2,
            'hw:numa_cpus.0': cpu_numa0, 'hw:numa_cpus.1': cpu_numa1,
            'hw:numa_mem.0': mem_numa0, 'hw:numa_mem.1': mem_numa1})

        # Boot instance
        with pytest.raises(InstanceError) as e:
            os_conn.create_server(name='vm', flavor=flv.id,
                                  nics=[{'net-id': networks[0]}],
                                  key_name=keypair.name,
                                  security_groups=[security_group.id],
                                  availability_zone='nova:{}'.format(host),
                                  wait_for_avaliable=False)
        expected_message = ("Insufficient compute resources: "
                            "Requested instance NUMA topology cannot fit the "
                            "given host NUMA topology")
        logger.info("Instance status is error:\n{0}".format(str(e.value)))
        assert expected_message in str(e.value), (
            "Unexpected reason of instance error")
    def test_lm_cinder_lvm_for_cpu_pinning(self, env, os_conn, networks,
                                           volume, keypair, flavors,
                                           security_group, aggregate,
                                           ubuntu_image_id):
        """This test checks that live migration executed successfully for
            instances created on computes with cinder and 2 Numa nodes
            Steps:
                1. Create net1 with subnet, net2 with subnet and  router1 with
                   interfaces to both nets
                2. Launch instance vm1 with volume vol1 on compute-1 in net1
                   with flavor m1.small.performance_1
                3. Launch instance vm2 on compute-2 in net2 with with flavor
                   m1.small.performance_1
                4. Live migrate vm1 with block-migrate parameter on compute-2
                   and check that vm moved to compute-2 with Active state
                5. Live migrate vm2 with block-migrate parameter on compute-1
                   and check that vm moved to compute-1 with Active state
                6. Check vms connectivity
                7. Run CPU load on vm2
                8. Live migrate vm2 with block-migrate parameter on compute-2
                   and check that vm moved to compute-2 with Active state
                9. Check vms connectivity
                10. Remove vm1 and vm2
                11. Repeat actions for flavor m1.medium.performance_2
        """
        hosts = aggregate.hosts
        cpus = get_cpu_distribition_per_numa_node(env)

        for numa_count, cpu_flavor in enumerate(flavors, start=1):
            vm_1 = os_conn.create_server(
                name='vm1', flavor=cpu_flavor.id,
                nics=[{'net-id': networks[0]}], key_name=keypair.name,
                availability_zone='nova:{}'.format(hosts[0]),
                security_groups=[security_group.id],
                block_device_mapping={'vda': volume.id})
            vm_2 = os_conn.create_server(
                name='vm2', image_id=ubuntu_image_id,
                flavor=cpu_flavor.id,
                key_name=keypair.name, userdata=userdata,
                availability_zone='nova:{}'.format(hosts[1]),
                security_groups=[security_group.id],
                nics=[{'net-id': networks[1]}])
            vms = [vm_1, vm_2]
            os_conn.wait_marker_in_servers_log([vm_2], marker)

            check_vm_connectivity_cirros_ubuntu(
                env, os_conn, keypair, cirros=vms[0], ubuntu=vms[1])
            self.live_migrate(os_conn, vms[0], hosts[1], block_migration=False)
            self.live_migrate(os_conn, vms[1], hosts[0])
            check_vm_connectivity_cirros_ubuntu(
                env, os_conn, keypair, cirros=vms[0], ubuntu=vms[1])
            self.cpu_load(env, os_conn, vms[1], vm_keypair=keypair)
            self.live_migrate(os_conn, vms[1], hosts[1])
            check_vm_connectivity_cirros_ubuntu(
                env, os_conn, keypair, cirros=vms[0], ubuntu=vms[1])
            self.cpu_load(env, os_conn, vms[1], vm_keypair=keypair,
                          action='stop')
            self.check_cpu_for_vm(os_conn, vms[0], numa_count, cpus[hosts[1]])
            self.check_cpu_for_vm(os_conn, vms[1], numa_count, cpus[hosts[1]])
            self.delete_servers(os_conn)
    def test_cpu_pinning_one_numa_cell(
            self, env, os_conn, networks, flavors, security_group,
            aggregate):
        """This test checks that cpu pinning executed successfully for
        instances created on computes with 1 NUMA
        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Launch instances vm1, vm3 in net1 with m1.small.performance on
            compute-1, vm2 on compute-2.
            3. Check numa nodes for all vms
            4. Check parameter in /etc/defaults/grub
            5. Check vms connectivity
        """
        hosts = aggregate.hosts
        vms = []
        network_for_instances = [networks[0], networks[1], networks[0]]
        hosts_for_instances = [hosts[0], hosts[1], hosts[0]]
        cpus = get_cpu_distribition_per_numa_node(env)

        for i in range(2):
            vms.append(os_conn.create_server(
                name='vm{}'.format(i),
                flavor=flavors[0].id,
                nics=[{'net-id': network_for_instances[i]}],
                availability_zone='nova:{}'.format(hosts_for_instances[i]),
                security_groups=[security_group.id]))

        for vm in vms:
            host = getattr(vm, "OS-EXT-SRV-ATTR:host")
            assert host in hosts
            self.check_cpu_for_vm(os_conn, vm, 1, cpus[host])

        network_checks.check_vm_connectivity(env, os_conn)
    def test_cpu_pinning_resize(
            self, env, os_conn, networks, flavors, security_group,
            aggregate, aggregate_n):
        """This test checks that cpu pinning executed successfully for
        instances created on computes with 1 NUMA
        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Launch vm1 using m1.small.performance-1 flavor on compute-1 and
            vm2 on compute-2 with m1.small.old flavor.
            3. Resize vm1 to m1.small.performance-2
            4. Ping vm1 from vm2
            5. Resize vm1 to m1.small.performance-3
            6. Ping vm1 from vm2
            7. Resize vm1 to m1.small.performance-1
            8. Ping vm1 from vm2
            9. Resize vm1 to m1.small.old
            10. Ping vm1 from vm2
            11. Resize vm1 to m1.small.performance-4
            12. Ping vm1 from vm2
            13. Resize vm1 to m1.small.performance-1
            14. Ping vm1 from vm2
        """
        hosts = aggregate.hosts
        vms = []
        cpus = get_cpu_distribition_per_numa_node(env)
        flavors_for_resize = ['m1.small.perfomance-2',
                              'm1.small.perfomance-3',
                              'm1.small.perfomance-1',
                              'm1.small.old', 'm1.small.perfomance-4',
                              'm1.small.perfomance-1']

        for i in range(2):
            vms.append(os_conn.create_server(
                name='vm{}'.format(i),
                flavor=flavors[i].id,
                nics=[{'net-id': networks[i]}],
                availability_zone='nova:{}'.format(hosts[i]),
                security_groups=[security_group.id]))
        vm = vms[0]

        for flavor in flavors_for_resize:
            numas = 2
            for object_flavor in flavors:
                if object_flavor.name == flavor:
                    vm = self.resize(os_conn, vm, object_flavor.id)
                    break
            if flavor is not 'm1.small.old':
                if flavor in ['m1.small.perfomance-4',
                              'm1.small.perfomance-1']:
                    numas = 1
                host = getattr(vm, "OS-EXT-SRV-ATTR:host")
                assert host in hosts
                self.check_cpu_for_vm(os_conn,
                                      os_conn.get_instance_detail(vm),
                                      numas, cpus[host])
            os_conn.wait_servers_ssh_ready(vms)
            network_checks.check_vm_connectivity(env, os_conn)
    def test_vms_connectivity_after_evacuation(self, env, os_conn, networks,
                                               flavors, aggregate,
                                               security_group, devops_env):
        """This test checks vms connectivity for vms with cpu pinning with 1
        NUMA after evacuation

        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Boot vm0 with cpu flavor on host0 and net0
            3. Boot vm1 with old flavor on host1 and net1
            4. Check vms connectivity
            5. Kill compute0 and evacuate vm0 to compute1 with
            --on-shared-storage parameter
            6. Check vms connectivity
            7. Check numa nodes for vm0
            8. Make compute0 alive
            9. Check that resources for vm0 were deleted from compute0
        """
        cpus = get_cpu_distribition_per_numa_node(env)
        hosts = aggregate.hosts
        vms = []

        for i in range(2):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=flavors[i].id,
                nics=[{'net-id': networks[i]}],
                availability_zone='nova:{}'.format(hosts[i]),
                security_groups=[security_group.id])
            vms.append(vm)
        network_checks.check_vm_connectivity(env, os_conn)
        self.check_cpu_for_vm(os_conn, vms[0], 1, cpus[hosts[0]])

        self.compute_change_state(os_conn, devops_env, hosts[0], state='down')
        vm0_new = self.evacuate(os_conn, devops_env, vms[0])
        vm0_new.get()
        new_host = getattr(vm0_new, "OS-EXT-SRV-ATTR:host")
        assert new_host in hosts, "Unexpected host after evacuation"
        assert new_host != hosts[0], "Host didn't change after evacuation"
        os_conn.wait_servers_ssh_ready(vms)
        network_checks.check_vm_connectivity(env, os_conn)
        self.check_cpu_for_vm(os_conn, vm0_new, 1, cpus[new_host])

        self.compute_change_state(os_conn, devops_env, hosts[0], state='up')
        old_hv = os_conn.nova.hypervisors.find(hypervisor_hostname=hosts[0])
        assert old_hv.running_vms == 0, (
            "Old hypervisor {0} shouldn't have running vms").format(hosts[0])

        instance_name = getattr(vm0_new, "OS-EXT-SRV-ATTR:instance_name")
        assert instance_name in self.get_instances(os_conn, new_host), (
            "Instance should be in the list of instances on the new host")
        assert instance_name not in self.get_instances(os_conn, hosts[0]), (
            "Instance shouldn't be in the list of instances on the old host")
    def test_vms_connectivity(self, env, os_conn, sriov_hosts,
                              computes_for_mixed_hp_and_numa, networks,
                              vf_ports, security_group, flavors,
                              ubuntu_image_id, keypair):
        """This test checks vms connectivity with all features
            Steps:
            1. Create net1 with subnet, router1 with interface to net1
            2. Create vm1 on vf port with flavor m2.small.hpgs_n-1 on host1
            3. Create vm2 on vf port with old flavor on host1
            4. Create vm3 with flavor m1.small.hpgs-1_n-2 on host1
            5. Create vm4 on vf port with m1.small.hpgs-1 on host2
            6. Create vm5 on vf port with old flavor on host2
            7. Create vm6 with m1.small.hpgs on host2
            8. Check that vms are on right numa-node
            9. Check page size for all vms
            10. Check vms connectivity
        """
        hosts = list(set(computes_for_mixed_hp_and_numa) & set(sriov_hosts))
        if len(hosts) < 2:
            pytest.skip("At least 2 hosts with all features are required")
        cpus = get_cpu_distribition_per_numa_node(env)
        vms = {}
        net = networks[0]
        vms_params = [(flavors[0], hosts[0], vf_ports[0], 1, page_2mb),
                      (flavors[4], hosts[0], vf_ports[1], None, None),
                      (flavors[1], hosts[0], None, 2, page_1gb),
                      (flavors[2], hosts[1], vf_ports[2], None, page_1gb),
                      (flavors[4], hosts[1], vf_ports[3], None, None),
                      (flavors[3], hosts[1], None, None, page_2mb)]
        for i, (flv, host, port, numa_count, size) in enumerate(vms_params):
            nics = [{'net-id': net}]
            if port is not None:
                nics = [{'port-id': port}]
            vm = os_conn.create_server(
                name='vm{}'.format(i), image_id=ubuntu_image_id,
                flavor=flv.id, nics=nics, key_name=keypair.name,
                availability_zone='nova:{}'.format(host),
                security_groups=[security_group.id], wait_for_active=False,
                wait_for_avaliable=False)
            vms.update({vm: {'numa': numa_count, 'size': size}})

        os_conn.wait_servers_active(vms.keys())
        os_conn.wait_servers_ssh_ready(vms.keys())
        for vm, param in vms.items():
            act_size = self.get_instance_page_size(os_conn, vm)
            assert act_size == param['size'], (
                "Unexpected package size. Should be {0} instead of {1}".format(
                    param['size'], act_size))
            if param['numa'] is not None:
                host = getattr(vm, 'OS-EXT-SRV-ATTR:host')
                self.check_cpu_for_vm(os_conn, vm, param['numa'], cpus[host])
        self.check_vm_connectivity_ubuntu(env, os_conn, keypair, vms.keys())
    def test_vms_with_isolate_cpu_thread_policy_less_resources(
            self, env, os_conn, hosts_hyper_threading, flavors, networks,
            keypair, security_group):
        """This test checks vms with cpu_thread_policy isolate parameter with
        less resources

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavor with hw:numa_nodes=1 and required
            cpu_thread_policy
            3. Boot vms to have no ability to create vm on different cores
            4. Boot vm with cpu pinning flavor (cpu_thread_policy = isolate)
            5. Check that vm is in error state
        """
        host = hosts_hyper_threading[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]
        zone = 'nova:{}'.format(host)

        # Get total pairs of cpus
        numa_nodes_count = len(cpus.keys())
        ts = set()
        for i in range(numa_nodes_count):
            ts.update(self.get_thread_siblings_lists(os_conn, host, i))
        ts_lsts = list(ts)

        # Boot vms to allocate vcpus in order to have no change to use cpus
        # from different cores: N-1 vms with 'require' flavor if N is total
        # count of vcps pairs
        count_require = len(ts_lsts) - 1
        for i in range(count_require):
            os_conn.create_server(name='vm{0}'.format(i),
                                  flavor=flavors[2].id,
                                  key_name=keypair.name,
                                  nics=[{'net-id': networks[0]}],
                                  security_groups=[security_group.id],
                                  wait_for_avaliable=False,
                                  availability_zone=zone)

        with pytest.raises(InstanceError) as e:
            os_conn.create_server(name='vm_isolate',
                                  flavor=flavors[1].id,
                                  key_name=keypair.name,
                                  nics=[{'net-id': networks[0]}],
                                  security_groups=[security_group.id],
                                  wait_for_avaliable=False,
                                  availability_zone=zone)
        exp_message = ("Insufficient compute resources: "
                       "Requested instance NUMA topology cannot fit the "
                       "given host NUMA topology")
        logger.info("Instance status is error:\n{0}".format(str(e.value)))
        assert exp_message in str(e.value), "Unexpected reason of error"
    def test_cpu_pinning_migration(
            self, env, os_conn, networks, flavors, security_group,
            aggregate):
        """This test checks that cpu pinning executed successfully for
        instances created on computes with 1 NUMA
        Steps:
            1. Create net1 with subnet, net2 with subnet and router1 with
            interfaces to both nets
            2. Launch vm1 using m1.small.performance flavor on compute-1 and
            vm2 on compute-2.
            3. Migrate vm1 from compute-1
            4. Check CPU Pinning
        """
        hosts = aggregate.hosts

        vms = []
        cpus = get_cpu_distribition_per_numa_node(env)

        for i in range(2):
            vms.append(os_conn.create_server(
                name='vm{}'.format(i),
                flavor=flavors[0].id,
                nics=[{'net-id': networks[0]}],
                availability_zone='nova:{}'.format(hosts[i]),
                security_groups=[security_group.id]))
        for i in range(5):
            vm_host = getattr(vms[0], "OS-EXT-SRV-ATTR:host")

            vm_0_new = self.migrate(os_conn, vms[0])
            vm_host_0_new = getattr(vm_0_new, "OS-EXT-SRV-ATTR:host")

            assert vm_host_0_new != vm_host

            for vm in vms:
                host = getattr(vm, "OS-EXT-SRV-ATTR:host")
                self.check_cpu_for_vm(os_conn,
                                      os_conn.get_instance_detail(vm), 2,
                                      cpus[host])

            os_conn.wait_servers_ssh_ready(vms)
            network_checks.check_vm_connectivity(env, os_conn)
 def test_vms_connectivity_sriov_numa(self, env, os_conn, sriov_hosts,
                                      aggregate, vf_ports, flavors,
                                      ubuntu_image_id, keypair):
     """This test checks vms connectivity with all features
         Steps:
         1. Create net1 with subnet, router1 with interface to net1
         2. Create vm1 on vf port with m1.small.performance on 1 NUMA-node
         3. Check that vm is on one numa-node
         4. Check Ping 8.8.8.8 from vm1
     """
     hosts = list(set(sriov_hosts) & set(aggregate.hosts))
     if len(hosts) < 1:
         pytest.skip(
             "At least one host is required with SR-IOV and 2 numa nodes")
     vm = self.create_vm(os_conn, hosts[0], flavors[0], keypair,
                         vf_ports[0], ubuntu_image_id)
     cpus = get_cpu_distribition_per_numa_node(env)
     self.check_cpu_for_vm(os_conn, vm, 1, cpus[hosts[0]])
     network_checks.check_ping_from_vm(env, os_conn, vm,
                                       vm_keypair=keypair,
                                       vm_login='******')
def computes_for_mixed_hp_and_numa(os_conn, env, computes_with_mixed_hp,
                                   computes_with_numa_nodes):
    hosts = list(
        set(computes_with_mixed_hp) & set(computes_with_numa_nodes))
    conf_cpu = get_cpu_distribition_per_numa_node(env)
    conf_hp = get_hp_distribution_per_numa_node(env, numa_count=2)
    for host in hosts:
        cpu0 = len(conf_cpu[host]['numa0'])
        cpu1 = len(conf_cpu[host]['numa1'])
        if cpu0 < 4 or cpu1 < 4:
            hosts.remove(host)
    for host in hosts:
        hp2mb_0 = conf_hp[host]['numa0'][page_2mb]['total']
        hp1gb_0 = conf_hp[host]['numa0'][page_1gb]['total']
        hp2mb_1 = conf_hp[host]['numa1'][page_2mb]['total']
        hp1gb_1 = conf_hp[host]['numa1'][page_1gb]['total']
        if hp2mb_0 < 1024 or hp2mb_1 < 1024 or hp1gb_0 < 2 or hp1gb_1 < 2:
            hosts.remove(host)
    if len(hosts) < 2:
        pytest.skip("Insufficient count of computes")
    return hosts
    def test_vms_with_custom_threading_policy(self, env, os_conn,
                                              hosts_hyper_threading,
                                              flavors, networks, keypair,
                                              security_group, policy,
                                              expected_count):
        """This test checks vcpu allocation for vms with different values of
        flavor cpu_thread_policy

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavor with hw:numa_nodes=1 and required
            cpu_thread_policy
            3. Boot vm
            4. Check that both cpu are on the different cores in case of
            cpu_thread_policy = isolate and on the same core in case of prefer
            or require
            5. Check ping 8.8.8.8 from vm
        """

        host = hosts_hyper_threading[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]

        flavors[0].set_keys({'hw:cpu_thread_policy': policy})
        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   key_name=keypair.name,
                                   nics=[{'net-id': networks[0]}],
                                   security_groups=[security_group.id],
                                   availability_zone='nova:{}'.format(host),
                                   wait_for_avaliable=False)
        self.check_cpu_for_vm(os_conn, vm, 1, cpus)

        used_ts = self.get_vm_thread_siblings_lists(os_conn, vm)
        assert len(used_ts) == expected_count, (
            "Unexpected count of used cores. It should be {0} for '{1}' "
            "threading policy, but actual it's {2}").format(
            expected_count, policy, len(used_ts))

        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
    def test_cpu_and_memory_distribution(self, env, os_conn, networks, flavors,
                                         security_group, aggregate, keypair):
        """This test checks distribution of cpu for vm with cpu pinning
        Steps:
            1. Create flavor with custom numa_cpu and numa_mem distribution
            2. Create net1 with subnet, net2 with subnet and router1 with
                interfaces to both nets
            3. Launch vm using created flavor
            4. Check memory allocation per numa node
            5. Check CPU allocation
            6. Ping 8.8.8.8 from vm1
        """

        host = aggregate.hosts[0]
        numa_count = 2
        cpus = get_cpu_distribition_per_numa_node(env)

        flavors[0].set_keys({'hw:numa_nodes': numa_count,
                             'hw:numa_cpus.0': self.cpu_numa0,
                             'hw:numa_cpus.1': self.cpu_numa1,
                             'hw:numa_mem.0': self.mem_numa0,
                             'hw:numa_mem.1': self.mem_numa1})

        exp_mem = {'0': self.mem_numa0, '1': self.mem_numa1}
        exp_pin = {'numa0': [int(cpu) for cpu in self.cpu_numa0.split(',')],
                   'numa1': [int(cpu) for cpu in self.cpu_numa1.split(',')]}
        vm = os_conn.create_server(name='vm', flavor=flavors[0].id,
                                   nics=[{'net-id': networks[0]}],
                                   key_name=keypair.name,
                                   security_groups=[security_group.id],
                                   availability_zone='nova:{}'.format(host))

        self.check_cpu_for_vm(os_conn, vm, numa_count, cpus[host], exp_pin)
        act_mem = self.get_memory_allocation_per_numa(os_conn, vm, numa_count)
        assert act_mem == exp_mem, "Actual memory allocation is not OK"
        network_checks.check_ping_from_vm(env, os_conn, vm, vm_keypair=keypair)
    def test_lm_ceph_for_cpu_pinning_and_hp(self, env, os_conn, nova_ceph,
                                            computes_with_mixed_hp, keypair,
                                            networks, aggregate, flavors,
                                            security_group, volume):
        """This test checks that live migration executed successfully for
            instances created on computes with ceph and 2 Numa nodes & mixed
            2Mb and 1Gb huge page.
            Required configuration: at least 2 hosts with mixed features, max
            host's usage is 3 vms
            Steps:
                1. Create net1 with subnet, net2 with subnet and  router1 with
                   interfaces to both nets
                2. Launch instance vm1 with volume vol1 on compute-1 in net1
                   with flavor m1.small.hpgs_numa
                3. Launch instance vm2 on compute-2 in net2 with flavor
                   m1.small.hpgs_numa
                4. Make volume from vm2 volume_vm2
                5. Launch instance vm3 on compute-2 in net2 with volume_vm2
                   with flavor m1.small.hpgs_numa
                6. Live migrate vm1 on compute-2 and check that vm moved to
                   compute-2 with Active state
                7. Live migrate vm2 with block-migrate parameter on compute-1
                   and check that vm moved to compute-1 with Active state
                8. Live migrate vm3 on compute-1 and check that vm moved to
                   compute-1 with Active state
                9. Check vms connectivity
                10. Check page size for all instances
                11. Check allocated vcpus for all instances
                12. Check count of free huge pages
                13. Remove all vms and repeat actions for m1.small.hpgs_numa-2
        """
        hosts_to_use = list(set(computes_with_mixed_hp) & set(aggregate.hosts))
        if len(hosts_to_use) < 2:
            pytest.skip("At least 2 hosts with mixed features are required.")

        cpus = get_cpu_distribition_per_numa_node(env)
        initial_conf_hp = computes_configuration(env)

        flavors_param = [(1, page_2mb, flavors[0]),
                         (2, page_1gb, flavors[1])]

        for numa, page_size, flavor in flavors_param:
            vm1 = os_conn.create_server(
                name='vm1', flavor=flavor.id,
                nics=[{'net-id': networks[0]}], key_name=keypair.name,
                availability_zone='nova:{}'.format(hosts_to_use[0]),
                security_groups=[security_group.id],
                block_device_mapping={'vda': volume.id})
            vm2 = os_conn.create_server(
                name='vm2', flavor=flavor.id,
                nics=[{'net-id': networks[1]}], key_name=keypair.name,
                availability_zone='nova:{}'.format(hosts_to_use[1]),
                security_groups=[security_group.id])
            volume_vm2 = self.create_volume_from_vm(os_conn, vm2)
            vm3 = os_conn.create_server(
                name='vm3', flavor=flavor.id,
                nics=[{'net-id': networks[1]}], key_name=keypair.name,
                availability_zone='nova:{}'.format(hosts_to_use[1]),
                security_groups=[security_group.id],
                block_device_mapping={'vda': volume_vm2})

            self.live_migrate(
                os_conn, vm1, hosts_to_use[1], block_migration=False)
            network_checks.check_vm_connectivity(env, os_conn)

            self.live_migrate(os_conn, vm2, hosts_to_use[0])
            network_checks.check_vm_connectivity(env, os_conn)

            self.live_migrate(
                os_conn, vm3, hosts_to_use[0], block_migration=False)
            network_checks.check_vm_connectivity(env, os_conn)

            expected_hosts = [(hosts_to_use[1], vm1),
                              (hosts_to_use[0], vm2),
                              (hosts_to_use[0], vm3)]
            for host, vm in expected_hosts:
                self.check_instance_page_size(os_conn, vm, page_size)
                self.check_cpu_for_vm(os_conn, vm, numa, cpus[host])

            expected_hosts_usage = [(hosts_to_use[0], 2), (hosts_to_use[1], 1)]
            final_conf_hp = computes_configuration(env)
            for host, nr in expected_hosts_usage:
                exp_free = (initial_conf_hp[host][page_size]['total'] -
                            nr * (flavor.ram * 1024 / page_size))
                act_free = final_conf_hp[host][page_size]['free']
                assert exp_free == act_free, (
                    'Unexpected count of {0} huge pages are free:'
                    '{1} instead of {2}'.format(page_size, act_free, exp_free))

            self.delete_servers(os_conn)
    def test_lm_cinder_for_cpu_pinning_and_hp(self, env, os_conn, keypair,
                                              computes_with_mixed_hp, volume,
                                              networks, aggregate, flavors,
                                              security_group, ubuntu_image_id):
        """This test checks that live migration executed successfully for
             instances created on computes with cinder and 2 Numa nodes and
             mixed 2Mb and 1Gb huge pages
             Steps:
                 1. Create net1 with subnet, net2 with subnet and  router1 with
                    interfaces to both nets
                 2. Launch instance vm1 with volume vol1 on compute-1 in net1
                    with flavor m1.m1.small.hpgs_numa
                 3. Launch instance vm2 on compute-2 in net2 with with flavor
                    m1.small.performance_1
                 4. Live migrate vm1 with block-migrate parameter on compute-2
                    and check that vm moved to compute-2 with Active state
                 5. Run CPU load on vm2
                 8. Live migrate vm2 on compute-1 and check that vm moved to
                    compute-2 with Active state
                 9. Check vms connectivity
                 10. Check size of huge pages for each vm
                 11. Check vcpus allocation for each vm
                 12. Check count of free huge pages is correct for each host
                 13. Delete all vms
                 14. Repeat actions for flavor m1.small.hpgs_numa-2
        """
        hosts = list(set(computes_with_mixed_hp) & set(aggregate.hosts))
        if len(hosts) < 2:
            pytest.skip("At least 2 hosts with mixed features are required.")

        cpus = get_cpu_distribition_per_numa_node(env)
        initial_conf_hp = computes_configuration(env)

        flavors_param = [(1, page_2mb, flavors[0]),
                         (2, page_1gb, flavors[1])]

        for numa, page_size, flavor in flavors_param:
            vm1 = os_conn.create_server(
                name='vm1', flavor=flavor.id,
                nics=[{'net-id': networks[0]}], key_name=keypair.name,
                availability_zone='nova:{}'.format(hosts[0]),
                security_groups=[security_group.id],
                block_device_mapping={'vda': volume.id},
                wait_for_active=False, wait_for_avaliable=False)
            vm2 = os_conn.create_server(
                name='vm2', image_id=ubuntu_image_id,
                flavor=flavor.id,
                key_name=keypair.name, userdata=userdata,
                availability_zone='nova:{}'.format(hosts[1]),
                security_groups=[security_group.id],
                nics=[{'net-id': networks[1]}],
                wait_for_active=False, wait_for_avaliable=False)
            os_conn.wait_servers_active([vm1, vm2])
            os_conn.wait_servers_ssh_ready([vm1, vm2])
            os_conn.wait_marker_in_servers_log([vm2], marker)

            self.live_migrate(os_conn, vm1, hosts[1], block_migration=False)
            check_vm_connectivity_cirros_ubuntu(env, os_conn, keypair,
                                                cirros=vm1, ubuntu=vm2)

            self.cpu_load(env, os_conn, vm2, vm_keypair=keypair)
            self.live_migrate(os_conn, vm2, hosts[0])
            check_vm_connectivity_cirros_ubuntu(env, os_conn, keypair,
                                                cirros=vm1, ubuntu=vm2)
            self.cpu_load(env, os_conn, vm2, vm_keypair=keypair, action='stop')

            expected_hosts = [(hosts[1], vm1), (hosts[0], vm2)]
            for host, vm in expected_hosts:
                self.check_instance_page_size(os_conn, vm, page_size)
                self.check_cpu_for_vm(os_conn, vm, numa, cpus[host])

            final_conf_hp = computes_configuration(env)
            for host in hosts:
                exp_free = (initial_conf_hp[host][page_size]['total'] -
                            1 * (flavor.ram * 1024 / page_size))
                act_free = final_conf_hp[host][page_size]['free']
                assert exp_free == act_free, (
                    'Unexpected count of {0} huge pages are free: '
                    '{1} instead of {2}'.format(page_size, act_free, exp_free))
            self.delete_servers(os_conn)
    def test_vms_with_custom_cpu_thread_policy_less_resources(
            self, env, os_conn, hosts_hyper_threading, flavors, networks,
            keypair, security_group, policy):
        """This test checks vms with cpu_thread_policy prefer/require parameter
         with less resources

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavor with hw:numa_nodes=1 and required
            cpu_thread_policy
            3. Boot vms to have no ability to create vm on one core
            4. Boot vm with cpu pinning flavor with required cpu_thread_policy
            5. For 'require' policy check that vm is in error state, for
            policy 'prefer' vm should be active and available
        """

        host = hosts_hyper_threading[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]
        zone = 'nova:{}'.format(host)
        flv_prefer, flv_isolate, flv_require = flavors

        numa_count = len(cpus.keys())
        ts_lists = [list(set(self.get_thread_siblings_lists(os_conn, host, i)))
                    for i in range(numa_count)]
        if ts_lists[0] <= 1 and ts_lists[1] <= 1:
            pytest.skip("Configuration is NOK since instance should be on the "
                        "one numa node and use cpus from the different cores")
        # Vms boot order depends on current environment
        #
        # If only 1 thread siblings list is on numa0 => we're not able to boot
        # vm on different cores anyway. Steps are:
        # 1) allocate whole numa0 by vm with flavor_require => numa0 is busy
        # 2) boot N-2 vms with the same flavor (N=count of thread siblings list
        #  is on numa1) => 2 cores are free
        # 3) Boot 1 vm with flavor_isolate => 2 vcpus from different cores to
        #  be allocated => 2 vcpus from different cores are free
        #
        # If 2 thread siblings list is on numa0 steps are:
        # 1) Boot 1 vm with flavor_isolate => 2 vcpus from different cores to
        #  be allocated => 2 vcpus from different cores are free
        # 2) Boot N vms with flavor_require (N=count of thread siblings list
        #  is on numa1)
        #
        # If more than 2 thread siblings list is on numa0 steps are:
        # 1) boot N-2 vms with flavor_require (N=count of thread siblings list
        #  is on numa0) => 2 cores are free
        # 2) boot vm with flavor_isolate => 2 vcpus from different cores are
        #  free
        # 3) Boot N vms with flavor_require (N=count of thread siblings list
        #  is on numa1)
        if len(ts_lists[0]) == 1:
            boot_order = [(flv_require, 1),
                          (flv_require, len(ts_lists[1]) - 2),
                          (flv_isolate, 1)]
        elif len(ts_lists[0]) == 2:
            boot_order = [(flv_isolate, 1),
                          (flv_require, len(ts_lists[1]))]
        else:
            boot_order = [(flv_require, len(ts_lists[0]) - 2),
                          (flv_isolate, 1),
                          (flv_require, len(ts_lists[1]))]

        for (flavor, count) in boot_order:
            for i in range(count):
                os_conn.create_server(name='vm{0}_{1}'.format(i, flavor.name),
                                      flavor=flavor.id,
                                      key_name=keypair.name,
                                      nics=[{'net-id': networks[0]}],
                                      security_groups=[security_group.id],
                                      wait_for_avaliable=False,
                                      availability_zone=zone)
        if policy == 'prefer':
            vm = os_conn.create_server(name='vm_{0}'.format(flv_prefer.name),
                                       flavor=flv_prefer.id,
                                       key_name=keypair.name,
                                       nics=[{'net-id': networks[0]}],
                                       security_groups=[security_group.id],
                                       wait_for_avaliable=False,
                                       availability_zone=zone)
            network_checks.check_ping_from_vm(env, os_conn, vm,
                                              vm_keypair=keypair)
        else:
            with pytest.raises(InstanceError) as e:
                os_conn.create_server(name='vm', flavor=flv_require.id,
                                      nics=[{'net-id': networks[0]}],
                                      key_name=keypair.name,
                                      security_groups=[security_group.id],
                                      availability_zone='nova:{}'.format(host),
                                      wait_for_avaliable=False)
            expected_message = ("Insufficient compute resources: "
                                "Requested instance NUMA topology cannot fit "
                                "the given host NUMA topology")
            logger.info("Instance status is error:\n{0}".format(str(e.value)))
            assert expected_message in str(e.value), (
                "Unexpected reason of instance error")
    def test_lm_ceph_for_cpu_pinning(self, env, os_conn, networks, nova_ceph,
                                     volume, flavors, security_group,
                                     aggregate):
        """This test checks that live migration executed successfully for
            instances created on computes with ceph and 2 Numa nodes
            Steps:
                1. Create net1 with subnet, net2 with subnet and  router1 with
                   interfaces to both nets
                2. Launch instance vm1 with volume vol1 on compute-1 in net1
                   with flavor m1.medium.performance_1
                3. Launch instance vm2 on compute-2 in net2 with flavor
                   m1.medium.performance_1
                4. Make volume from vm2 volume_vm2
                5. Launch instance vm3 on compute-2 in net2 with volume_vm2
                   with flavor m1.medium.performance_1
                6. Live migrate vm1 on compute-2 and check that vm moved to
                   compute-2 with Active state
                7. Live migrate vm2 with block-migrate parameter on compute-1
                   and check that vm moved to compute-1 with Active state
                8. Live migrate vm3 on compute-1 and check that vm moved to
                   compute-1 with Active state
                9. Check vms connectivity
                10. Remove vm1, vm2 and vm3
                11. Repeat actions for flavor m1.medium.performance_2
        """
        hosts = aggregate.hosts
        cpus = get_cpu_distribition_per_numa_node(env)

        for numa_count, cpu_flavor in enumerate(flavors, start=1):
            vm_1 = os_conn.create_server(
                name='vm1', flavor=cpu_flavor.id,
                nics=[{'net-id': networks[0]}],
                availability_zone='nova:{}'.format(hosts[0]),
                security_groups=[security_group.id],
                block_device_mapping={'vda': volume.id})
            vm_2 = os_conn.create_server(
                name='vm2', flavor=cpu_flavor.id,
                availability_zone='nova:{}'.format(hosts[1]),
                security_groups=[security_group.id],
                nics=[{'net-id': networks[1]}])
            volume_vm2 = self.create_volume_from_vm(os_conn, vm_2, size=20)
            vm_3 = os_conn.create_server(
                name='vm3', flavor=cpu_flavor.id,
                nics=[{'net-id': networks[1]}],
                availability_zone='nova:{}'.format(hosts[1]),
                security_groups=[security_group.id],
                block_device_mapping={'vda': volume_vm2})
            vms = [vm_1, vm_2, vm_3]
            network_checks.check_vm_connectivity(env, os_conn)

            self.live_migrate(os_conn, vms[0], hosts[1], block_migration=False)
            self.check_cpu_for_vm(os_conn, vms[0], numa_count, cpus[hosts[1]])

            self.live_migrate(os_conn, vms[1], hosts[0])
            self.check_cpu_for_vm(os_conn, vms[1], numa_count, cpus[hosts[0]])

            self.live_migrate(os_conn, vms[2], hosts[0], block_migration=False)
            self.check_cpu_for_vm(os_conn, vms[2], numa_count, cpus[hosts[0]])

            network_checks.check_vm_connectivity(env, os_conn)
            self.delete_servers(os_conn)
    def test_vms_with_custom_cpu_thread_policy_less_resources(
            self, env, os_conn, hosts_hyper_threading, flavors, networks,
            keypair, security_group, policy):
        """This test checks vms with cpu_thread_policy prefer/require parameter
         with less resources

        Steps:
            1. Create net1 with subnet and router1 with interface to net1
            2. Create cpu pinning flavor with hw:numa_nodes=1 and required
            cpu_thread_policy
            3. Boot vms to have no ability to create vm on one core
                Steps are:
                1) boot M + N - 1 vms with flavor_require
                    N = count of thread siblings list is on numa0
                    M = count of thread siblings list is on numa0
                    As result 1 core is free
                2) create 2 vms with 1 vcpu and 'prefer' policy
                3) delete 1 vm with 2 vcpu from step 1 or 2
                4) create 1 vm with 1 vcpu and 'prefer' policy
                5) delete 1 vm with 1 vpcu from step 3
            4. Boot vm with cpu pinning flavor with required cpu_thread_policy
            5. For 'require' policy check that vm is in error state, for
            policy 'prefer' vm should be active and available
        """

        host = hosts_hyper_threading[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]
        zone = 'nova:{}'.format(host)
        flv_prefer, _, flv_require, flv_prefer_1_vcpu = flavors

        numa_count = len(cpus.keys())
        ts_lists = [list(set(self.get_thread_siblings_lists(os_conn, host, i)))
                    for i in range(numa_count)]
        if ts_lists[0] <= 1 and ts_lists[1] <= 1:
            pytest.skip("Configuration is NOK since instance should be on the "
                        "one numa node and use cpus from the different cores")

        def create_server_with_flavor(prefix, flavor):
            return os_conn.create_server(
                name='vm{0}_{1}'.format(prefix, flavor.name),
                flavor=flavor.id,
                key_name=keypair.name,
                nics=[{'net-id': networks[0]}],
                security_groups=[security_group.id],
                wait_for_avaliable=False,
                availability_zone=zone)

        # Boot vms to have no ability to create vm on one core
        for i in range(len(ts_lists[0]) + len(ts_lists[1]) - 1):
            vm_2_vcpu = create_server_with_flavor(prefix=i, flavor=flv_require)

        for i in range(2):
            vm_1_vcpu = create_server_with_flavor(prefix="{0}_vcpu1".format(i),
                                                  flavor=flv_prefer_1_vcpu)
        vm_2_vcpu.delete()
        os_conn.wait_servers_deleted([vm_2_vcpu])
        create_server_with_flavor(prefix="_vcpu1_prefer",
                                  flavor=flv_prefer_1_vcpu)
        vm_1_vcpu.delete()
        os_conn.wait_servers_deleted([vm_1_vcpu])

        # Boot vm with cpu pinning flavor with required cpu_thread_policy
        if policy == 'prefer':
            vm = os_conn.create_server(name='vm_{0}'.format(flv_prefer.name),
                                       flavor=flv_prefer.id,
                                       key_name=keypair.name,
                                       nics=[{'net-id': networks[0]}],
                                       security_groups=[security_group.id],
                                       wait_for_avaliable=False,
                                       availability_zone=zone)
            os_conn.wait_servers_ssh_ready(os_conn.get_servers())
            network_checks.check_ping_from_vm(env, os_conn, vm,
                                              vm_keypair=keypair)
        else:
            with pytest.raises(InstanceError) as e:
                os_conn.create_server(name='vm', flavor=flv_require.id,
                                      nics=[{'net-id': networks[0]}],
                                      key_name=keypair.name,
                                      security_groups=[security_group.id],
                                      availability_zone='nova:{}'.format(host),
                                      wait_for_avaliable=False)
            expected_message = ("Insufficient compute resources: "
                                "Requested instance NUMA topology cannot fit "
                                "the given host NUMA topology")
            logger.info("Instance status is error:\n{0}".format(str(e.value)))
            assert expected_message in str(e.value), (
                "Unexpected reason of instance error")
    def test_vms_connectivity_with_hp_and_numa(self, env, os_conn,
                                               computes_for_mixed_hp_and_numa,
                                               aggregate, networks,
                                               security_group, flavors):
        """This test checks vms connectivity with huge pages and cpu pinning.

            At least 2 computes with mixed huge pages and cpu pinning are
            required. 2 numa nodes should be available per compute. 512 x 2Mb
            and 1 x 1Gb huge pages should be available for each numa node.
            Specific distribution of cpus per numa nodes is required for each
            compute: at least 4 cpus for one node and at least 2 cpus for
            another one.

            Steps:
            1. Create net1 with subnet, net2 with subnet and  router1 with
            interfaces to both nets
            2. Create vm1 in net1 on host1 using flavor m1.small.hpgs_n-2
             (hw:mem_page_size=2048 and hw:numa_nodes=2)
            3. Create vm2 in net1 on host1 using flavor m1.small.performance-2
             (hw:numa_nodes=2, without huge pages)
            4. Create vm3 in net2 on host2 using flavor m1.small.hpgs_n-1
             (hw:mem_page_size=2048 and hw:numa_nodes=1)
            5. Create vm4 in net2 on host2 using flavor m1.small.hpgs
             (hw:mem_page_size=1048576 without cpu pinning)
            6. Create vm5 in net1 on host1 using flavor m1.small.old
             (without features)
            7. Check cpus allocation for all vms
            8. Check page size for all vms
            9. Check free huge pages when all vms are running
            10. Check connectivity between all vms
        """
        hosts = computes_for_mixed_hp_and_numa
        initial_conf_hp = computes_configuration(env)
        cpus = get_cpu_distribition_per_numa_node(env)

        vms = {}
        vms_params = [(flavors[3], hosts[0], networks[0], 2, page_2mb),
                      (flavors[4], hosts[0], networks[0], 2, None),
                      (flavors[2], hosts[1], networks[1], 1, page_2mb),
                      (flavors[1], hosts[1], networks[1], None, page_1gb),
                      (flavors[0], hosts[0], networks[0], None, None)]

        for i, (flv, host, net, numa_count, size) in enumerate(vms_params):
            vm = os_conn.create_server(
                name='vm{}'.format(i), flavor=flv.id,
                nics=[{'net-id': net}],
                availability_zone='nova:{}'.format(host),
                security_groups=[security_group.id], wait_for_active=False,
                wait_for_avaliable=False)
            vms.update({vm: {'numa': numa_count, 'size': size}})
        os_conn.wait_servers_active(vms)
        os_conn.wait_servers_ssh_ready(vms)

        for vm, param in vms.items():
            self.check_instance_page_size(os_conn, vm, param['size'])
            if param['numa'] is not None:
                host = getattr(vm, 'OS-EXT-SRV-ATTR:host')
                self.check_cpu_for_vm(os_conn, vm, param['numa'], cpus[host])

        network_checks.check_vm_connectivity(env, os_conn)

        final_conf = computes_configuration(env)
        vms_distribution = [(hosts[0], 0, 1), (hosts[1], 1, 1), ]
        for (host, nr_1gb, nr_2mb) in vms_distribution:
            exp_free_1g = initial_conf_hp[host][page_1gb]['total'] - nr_1gb * 1
            exp_free_2m = (
                initial_conf_hp[host][page_2mb]['total'] - nr_2mb * 512)
            assert exp_free_1g == final_conf[host][page_1gb]['free']
            assert exp_free_2m == final_conf[host][page_2mb]['free']
    def test_negative_lack_of_resources_on_pci_device(
            self, os_conn, env, ubuntu_image_id, keypair, mixed_hosts,
            vf_ports, flavors, networks, cleanup):
        """This test checks error state for vm when resources are not enough
            on pci device.

            Steps:
            1. Create network net1 with subnet
            2. Create router, set gateway and add interface for the network
            3. Create flavor for 2Mb huge pages
            4. Create flavor for cpu pinning with hw:numa_nodes=1
            5. Boot vm with the 1st flavor with vf_port on numa without pci
            device (usually it's numa1)
            6. Check that vms are in error state since no pci device found
            7. Redo for the 2nd flavor
        """
        host = mixed_hosts[0]
        cpus = get_cpu_distribition_per_numa_node(env)[host]
        hps = get_hp_distribution_per_numa_node(env)[host]

        # Calculate number of vcpus/huge pages for each numa in order to occupy
        #  all of them. Usually pci device is on numa1 => next step
        # (i.e. remove vm from numa0) allows to get numa with huge pages and
        # cpu pinning, but without sr-iov
        vms = {}

        # we need to order cpus so that numa with more cpus booted at first
        sorted_cpus = sorted(cpus.items(), key=lambda x: len(x[1]),
                             reverse=True)
        for numa, cpu_list in sorted_cpus:
            free_2mb = hps[numa][page_2mb]['free']
            flv = os_conn.nova.flavors.create(name='flavor_{}'.format(numa),
                                              ram=free_2mb * 2, disk=5,
                                              vcpus=len(cpu_list))
            self.created_flvs.append(flv)
            flv.set_keys({'hw:cpu_policy': 'dedicated',
                          'aggregate_instance_extra_specs:pinned': 'true',
                          'hw:numa_nodes': 1,
                          'hw:mem_page_size': page_2mb})

            vm = os_conn.create_server(
                name='vm_to_{0}'.format(numa), image_id=ubuntu_image_id,
                key_name=keypair.name, nics=[{'net-id': networks[0]}],
                availability_zone='nova:{}'.format(host), flavor=flv.id,
                wait_for_avaliable=False)
            nodeset = self.get_nodesets_for_vm(os_conn, vm)[0]
            assert numa == "numa{0}".format(nodeset), (
                "Nodeset used for {0} should be {1}, but it's {2}. "
                "It's critical for this test since pci device is on numa1 only"
                .format(vm, numa, "numa{0}".format(nodeset)))
            vms[numa] = vm

        # Remove vm from numa0
        vms['numa0'].delete()
        os_conn.wait_servers_deleted([vms['numa0']])

        # Boot vms with pci device
        for i, flavor in enumerate(flavors):
            with pytest.raises(InstanceError) as e:
                os_conn.create_server(
                    name='vm', image_id=ubuntu_image_id,
                    key_name=keypair.name, flavor=flavor.id,
                    availability_zone='nova:{}'.format(host),
                    nics=[{'port-id': vf_ports[i]}])
            expected_message = ("Insufficient compute resources: "
                                "Requested instance NUMA topology together "
                                "with requested PCI devices cannot fit the "
                                "given host NUMA topology")
            logger.info("Instance status is error:\n{0}".format(str(e.value)))
            assert expected_message in str(e.value), "Unexpected reason"