Exemple #1
0
    def host_passes(self, host_state, filter_properties):
        """CpuBindFilter

        The instance numa topology is like:
        {u'instance_uuid': u'786d1430-dfe2-4423-8522-4a5394715b32',
        u'cells': [{u'cpuset': [0],
                   u'id': 0,
                   u'memory': 256},
                   {u'cpuset': [1,2,3],
                   u'id': 1,
                   u'memory': 768 }],
        u'id': 122}
        """

        inst_prop = filter_properties['request_spec'].get(
            'instance_properties')
        inst_numa_top = inst_prop.get('numa_topology') if inst_prop else None
        # get the pagesize from instance_type
        instance_type = filter_properties['request_spec'].get(
            'instance_type', None)
        pagesize = None
        if instance_type:
            pagesize = instance_type.get('extra_specs',
                                         {}).get("hw:mem_page_size", None)

        if inst_numa_top:
            inst_numa_top = utils.convert_inst_numa_topology(inst_numa_top)

        vcpus = filter_properties.get('instance_type').get('vcpus')
        mem = filter_properties.get('instance_type').get('memory_mb')

        enable_bind, enable_ht, any_mode, numa_opts = \
            utils.get_inst_affinity_mask(filter_properties)

        request_spec = filter_properties['request_spec']
        host_numa_top = copy.deepcopy(
            jsonutils.loads(host_state.numa_topology or '{}'))
        if host_numa_top and host_numa_top.get('nova_object.data'):
            host_numa_top = utils.convert_host_numa_topology(host_numa_top)
        elif numa_opts or enable_bind or not any_mode or pagesize:
            LOG.debug('Host %s don\'t support numa,don\'t pass',
                      host_state.host)
            LOG.debug('options:enable_bind(%s), any_mode(%s), numa_opts(%s), '
                      'pagesize(%s)' %
                      (enable_bind, any_mode, numa_opts, pagesize))
            return False
        else:
            LOG.debug('Host %s don\'t support numa, pass', host_state.host)
            return True

        context = filter_properties['context'].elevated()
        hw_instance_extras = objects.HuaweiInstanceExtra.get_by_host(
            context, host_state.host)

        instance_uuids = request_spec.get('instance_uuids', [])
        instance_uuid = instance_uuids[0] if instance_uuids else None

        if utils.is_host_numa_confict(hw_instance_extras, numa_opts,
                                      instance_uuid):
            LOG.debug(
                'Numa instance cannot booted with non-numa '
                'instance in host %s', host_state.host)
            return False

        if utils.is_any_node_confict(hw_instance_extras, any_mode,
                                     instance_uuid):
            LOG.debug(
                'any-mode instance cannot booted with non-any-mode '
                'instance in host %s', host_state.host)
            return False
        if (enable_bind, any_mode, numa_opts) == (False, True,
                                                  0) and not pagesize:
            LOG.debug('Cpubindfilter passed, enable_bind(%s), any_mode(%s),'
                      ' numa_opts(%s)' % (enable_bind, any_mode, numa_opts))
            return True
        if not host_state.numa_topology:
            LOG.info(
                "No numa topology info of host: %s found, cpu bind "
                "filter failed", host_state.host)
            return False
        # when a numa vm migrate from host1 to host2, and host1 is empty
        # if you want to forbid to create a no-numa vm in host1
        # call this function earlier
        hw_instance_extras = \
            utils._extend_hw_instance_extra(context, host_state.host)
        utils.update_numa_topo_bind_info(host_numa_top, hw_instance_extras,
                                         any_mode, instance_uuid)
        LOG.debug(
            "CpuBindFilter trying to filter instance: %(instance_uuid)s,"
            " with host_numa_top:%(host_numa_top)s of host:%(host)s", {
                "instance_uuid": instance_uuid,
                "host_numa_top": host_numa_top,
                "host": host_state.host
            })
        enable_evs = utils.get_evs_affinity(filter_properties)

        sriov_numa_id = None
        evs_numa_id = None
        if numa_opts == 2:
            try:
                sriov_numa_id = utils.get_numa_id_with_vf_request(
                    host_state, filter_properties)
            except exception.NovaException as ex:
                LOG.info(ex.format_message())
                return False
        if enable_evs:
            try:
                evs_numa_id = utils.get_specific_numa(host_state,
                                                      filter_properties)
            except exception.NovaException as error:
                # if catch the exception, the host is not suitable for creating
                # evs instances.
                LOG.debug(error.format_message())
                return False

        if sriov_numa_id and enable_evs and sriov_numa_id != evs_numa_id:
            LOG.info("Both EVS Numa and IO Numa are specified, But SRIOV"
                     "(node:%s) and EVS(node:%s) devices are not in one "
                     "numa cell." % (sriov_numa_id, evs_numa_id))
            return False
        specific_numa_id = sriov_numa_id or evs_numa_id

        try:
            if inst_numa_top and not numa_opts:
                request_spec = filter_properties.get('request_spec', {})
                instance = request_spec.get('instance_properties', {})
                fitted_inst_numas = utils.get_fitted_inst_numas(
                    instance, host_state)
                utils.pin_vcpu_with_inst_numa(enable_bind, enable_ht, any_mode,
                                              fitted_inst_numas, host_numa_top)
            else:
                utils.pin_vcpu(enable_bind,
                               enable_ht,
                               any_mode,
                               vcpus,
                               mem,
                               host_numa_top,
                               numa_opts,
                               numa_id=specific_numa_id,
                               pagesize=pagesize,
                               host_state=host_state)
        except exception.NovaException as ex:
            LOG.debug(ex.format_message())
            return False
        return True
Exemple #2
0
def update_cpu_bind_info_to_xml(xml, instance, driver, network_info):

    admin_context = nova_context.get_admin_context()
    inst_extra = objects.HuaweiInstanceExtra.get_by_instance_uuid(
        admin_context,
        instance.uuid)
    scheduler_hints = jsonutils.loads(inst_extra.scheduler_hints or '{}')

    def _is_sriov_instance(xml):
        doc = etree.fromstring(xml)
        interfaces = doc.findall('./devices/interface')
        for interface in interfaces:
            if "type" in interface.keys():
                if interface.attrib['type'] == 'hostdev':
                    return True
        return False

    LOG.debug("Instance %s in %s task_state, get binding_info from db"
              % (instance.uuid, instance.task_state))
    bind_info = jsonutils.loads(inst_extra.core_bind or '[]')
    vcpus = [cell['vcpu'] for cell in bind_info]
    pcpus = [cell['pcpus'] for cell in bind_info]
    bind_info = dict(zip(vcpus, pcpus))
    enable_bind, enable_ht, any_mode, numa_opts = \
        hw_shed_utils.get_inst_affinity_mask(
            dict(scheduler_hints=scheduler_hints))
    if instance.numa_topology:
        cell = instance.numa_topology.cells[0]
        instance_numa = {"cells": [{"mem": {"total": cell.memory},
                                    "cpuset": list(cell.cpuset),
                                    "id": cell.id,
                                    "pagesize": cell.pagesize}]}
        if numa_opts:
            instance_numa['cells'][0]['is_huawei'] = True
    else:
        instance_numa = None

    # if existed vhostuser port, we should change the xml
    if libvirt_ext_utils.get_physical_network(network_info):

        # modify xml for evs vhostuser port, if the vif details exist physical
        # network, we think the port is vhostuser port.
        xml = modify_xml_for_evs_vhostuser(xml)
        LOG.debug(_("modify_xml_for_evs_vhostuser is %s"), xml)

    LOG.debug("Binding cpu, the bind_info is %(bind_info)s, the instance"
              " numa is %(instance_numa)s", {'bind_info': bind_info,
                                             'instance_numa': instance_numa})

    if not bind_info:
        return xml

    doc = etree.fromstring(xml)
    cpu_element = doc.find("cpu")
    vcpu_topo = (cpu_element.find("topology") if cpu_element is not None else
                None)


    ht = scheduler_hints.get('hyperThreadAffinity', 'any')
    db_vcpu_topo = {}
    sockets = vcpu_topo.get("sockets")
    cores = vcpu_topo.get("cores")
    threads = vcpu_topo.get("threads")
    if vcpu_topo is not None:
        if ht == 'lock' and enable_ht and len(bind_info) > 2 and not _is_sriov_instance(xml):
            threads = 2
            cores = 1
            if len(bind_info) % 2 == 0:
                sockets = len(bind_info) / (threads * cores) - 1
                vcpu_topo.set("sockets", str(sockets))
                vcpu_topo.set("threads", str(threads))
                vcpu_topo.set("cores", str(cores))
            else:
                msg = ("Cannot set vcpu topology in sync mode, the bind_info"
                       " is %(bind_info)s, the instance numa is %(instance_numa"
                       ")s" % {'bind_info': bind_info,
                               'instance_numa': instance_numa})
                raise Exception(msg)
        else:
            if threads == '1' and enable_ht:
                if int(sockets) % 2 == 0:
                    sockets = str(int(sockets) / 2)
                    vcpu_topo.set("sockets", sockets)
                    threads = str(int(threads) * 2)
                    vcpu_topo.set("threads", threads)
                elif int(cores) % 2 == 0:
                    cores = str(int(cores) / 2)
                    vcpu_topo.set("cores", cores)
                    threads = str(int(threads) * 2)
                    vcpu_topo.set("threads", threads)
                else:
                    msg = ("Cannot set vcpu topology in sync mode, the bind_info"
                           " is %(bind_info)s, the instance numa is %(instance_numa"
                           ")s" % {'bind_info': bind_info,
                                   'instance_numa': instance_numa})
                    raise Exception(msg)

        db_vcpu_topo = {'sockets': int(sockets), 'cores': int(cores),
                        'threads': int(threads)}
    hw_shed_utils.update_cpu_bind_info_to_db(bind_info, instance.uuid,
                                             instance_numa,
                                             vcpu_topo=db_vcpu_topo)

    cpu = doc.findall('cputune')
    for c in cpu:
        doc.remove(c)

    emulator_pin_bindcpu = None
    if ht == 'lock':
        if CONF.emulator_pin_bindcpu and _is_sriov_instance(xml):
            emulator_pin_bindcpu = CONF.emulator_pin_bindcpu
        else:
            cells = jsonutils.loads(
                driver.host_state._stats['numa_topology']).get(
                    'nova_object.data', {}).get('cells', [])

            all_siblings = []
            for cell in cells:
                _siblings = cell.get('nova_object.data', {}).get('siblings', [])
                all_siblings = all_siblings + _siblings

            if len(bind_info) > 2:
                last_cpu_idx = bind_info[sorted(bind_info.keys())[-1]][0]
                for core in all_siblings:
                    if last_cpu_idx in core:
                        for (k, v) in bind_info.items():
                            if v[0] in core:
                                del bind_info[k]

                        emulator_pin_bindcpu = ",".join([str(c) for c in core])
                        break

                new_bind_info = {}
                sorted_keys = sorted(bind_info.keys())
                for idx, key in enumerate(sorted_keys):
                    new_bind_info[idx] = bind_info[key]

                bind_info = new_bind_info

    emulatorpin_cpuset = []
    cputune = etree.Element("cputune")
    for k, v in bind_info.items():
        cpuset = ','.join([str(c) for c in v])
        cputune.append(etree.Element("vcpupin", vcpu=str(k), cpuset=cpuset))
        emulatorpin_cpuset.extend(v)
    emulatorpin_cpuset = list(set(emulatorpin_cpuset))
    emulatorpin_cpuset.sort()
    default_emulatorpin_cpuset_str = ','.join(map(lambda x: str(x),
                                                  emulatorpin_cpuset))
    LOG.debug("emulatorpin_cpuset is %s",
              emulator_pin_bindcpu or default_emulatorpin_cpuset_str)
    cputune.append(etree.Element("emulatorpin", cpuset=emulator_pin_bindcpu or default_emulatorpin_cpuset_str))
    doc.append(cputune)
    # NOTE: when use huawei numa or bind, we should clean the cpuset of
    # vcpu element, if bind_info isn't {}, that means we shouldn't specify
    # the cpuset of vcpu element, if bind_info is {}, it will be returned
    # above.
    vcpu_element = doc.findall('vcpu')
    for vcpu_e in vcpu_element:
        doc.remove(vcpu_e)
    vcpu_element = etree.Element("vcpu")
    vcpu_element.text = str(len(bind_info))
    doc.append(vcpu_element)
    if instance_numa and instance_numa['cells'][0].get('is_huawei'):
        numa = doc.findall('numatune')
        for nm in numa:
            doc.remove(nm)
        numatune = etree.Element("numatune")
        cell_id = instance_numa['cells'][0]['id']
        cell_e = etree.Element("memory", mode="strict", nodeset=str(cell_id))
        numatune.append(cell_e)
        doc.append(numatune)

    def _update_numa_cell(doc, bind_info):
        cells = doc.findall('./cpu/numa/cell')
        for cell in cells:
            cell.attrib['cpus'] = ','.join([str(vcpu) for vcpu in bind_info.keys()])

    _update_numa_cell(doc, bind_info)
    return etree.tostring(doc, pretty_print=True)
Exemple #3
0
def update_cpu_bind_info_to_xml(xml, instance, driver, network_info):

    admin_context = nova_context.get_admin_context()
    inst_extra = objects.HuaweiInstanceExtra.get_by_instance_uuid(
        admin_context, instance.uuid)
    scheduler_hints = jsonutils.loads(inst_extra.scheduler_hints or '{}')

    def _is_sriov_instance(xml):
        doc = etree.fromstring(xml)
        interfaces = doc.findall('./devices/interface')
        for interface in interfaces:
            if "type" in interface.keys():
                if interface.attrib['type'] == 'hostdev':
                    return True
        return False

    LOG.debug("Instance %s in %s task_state, get binding_info from db" %
              (instance.uuid, instance.task_state))
    bind_info = jsonutils.loads(inst_extra.core_bind or '[]')
    vcpus = [cell['vcpu'] for cell in bind_info]
    pcpus = [cell['pcpus'] for cell in bind_info]
    bind_info = dict(zip(vcpus, pcpus))
    enable_bind, enable_ht, any_mode, numa_opts = \
        hw_shed_utils.get_inst_affinity_mask(
            dict(scheduler_hints=scheduler_hints))
    if instance.numa_topology:
        cell = instance.numa_topology.cells[0]
        instance_numa = {
            "cells": [{
                "mem": {
                    "total": cell.memory
                },
                "cpuset": list(cell.cpuset),
                "id": cell.id,
                "pagesize": cell.pagesize
            }]
        }
        if numa_opts:
            instance_numa['cells'][0]['is_huawei'] = True
    else:
        instance_numa = None

    # if existed vhostuser port, we should change the xml
    if libvirt_ext_utils.get_physical_network(network_info):

        # modify xml for evs vhostuser port, if the vif details exist physical
        # network, we think the port is vhostuser port.
        xml = modify_xml_for_evs_vhostuser(xml)
        LOG.debug(_("modify_xml_for_evs_vhostuser is %s"), xml)

    LOG.debug(
        "Binding cpu, the bind_info is %(bind_info)s, the instance"
        " numa is %(instance_numa)s", {
            'bind_info': bind_info,
            'instance_numa': instance_numa
        })

    if not bind_info:
        return xml

    doc = etree.fromstring(xml)
    cpu_element = doc.find("cpu")
    vcpu_topo = (cpu_element.find("topology")
                 if cpu_element is not None else None)

    ht = scheduler_hints.get('hyperThreadAffinity', 'any')
    db_vcpu_topo = {}
    sockets = vcpu_topo.get("sockets")
    cores = vcpu_topo.get("cores")
    threads = vcpu_topo.get("threads")
    if vcpu_topo is not None:
        if ht == 'lock' and enable_ht and len(
                bind_info) > 2 and not _is_sriov_instance(xml):
            threads = 2
            cores = 1
            if len(bind_info) % 2 == 0:
                sockets = len(bind_info) / (threads * cores) - 1
                vcpu_topo.set("sockets", str(sockets))
                vcpu_topo.set("threads", str(threads))
                vcpu_topo.set("cores", str(cores))
            else:
                msg = (
                    "Cannot set vcpu topology in sync mode, the bind_info"
                    " is %(bind_info)s, the instance numa is %(instance_numa"
                    ")s" % {
                        'bind_info': bind_info,
                        'instance_numa': instance_numa
                    })
                raise Exception(msg)
        else:
            if threads == '1' and enable_ht:
                if int(sockets) % 2 == 0:
                    sockets = str(int(sockets) / 2)
                    vcpu_topo.set("sockets", sockets)
                    threads = str(int(threads) * 2)
                    vcpu_topo.set("threads", threads)
                elif int(cores) % 2 == 0:
                    cores = str(int(cores) / 2)
                    vcpu_topo.set("cores", cores)
                    threads = str(int(threads) * 2)
                    vcpu_topo.set("threads", threads)
                else:
                    msg = (
                        "Cannot set vcpu topology in sync mode, the bind_info"
                        " is %(bind_info)s, the instance numa is %(instance_numa"
                        ")s" % {
                            'bind_info': bind_info,
                            'instance_numa': instance_numa
                        })
                    raise Exception(msg)

        db_vcpu_topo = {
            'sockets': int(sockets),
            'cores': int(cores),
            'threads': int(threads)
        }
    hw_shed_utils.update_cpu_bind_info_to_db(bind_info,
                                             instance.uuid,
                                             instance_numa,
                                             vcpu_topo=db_vcpu_topo)

    cpu = doc.findall('cputune')
    for c in cpu:
        doc.remove(c)

    emulator_pin_bindcpu = None
    if ht == 'lock':
        if CONF.emulator_pin_bindcpu and _is_sriov_instance(xml):
            emulator_pin_bindcpu = CONF.emulator_pin_bindcpu
        else:
            cells = jsonutils.loads(
                driver.host_state._stats['numa_topology']).get(
                    'nova_object.data', {}).get('cells', [])

            all_siblings = []
            for cell in cells:
                _siblings = cell.get('nova_object.data',
                                     {}).get('siblings', [])
                all_siblings = all_siblings + _siblings

            if len(bind_info) > 2:
                last_cpu_idx = bind_info[sorted(bind_info.keys())[-1]][0]
                for core in all_siblings:
                    if last_cpu_idx in core:
                        for (k, v) in bind_info.items():
                            if v[0] in core:
                                del bind_info[k]

                        emulator_pin_bindcpu = ",".join([str(c) for c in core])
                        break

                new_bind_info = {}
                sorted_keys = sorted(bind_info.keys())
                for idx, key in enumerate(sorted_keys):
                    new_bind_info[idx] = bind_info[key]

                bind_info = new_bind_info

    emulatorpin_cpuset = []
    cputune = etree.Element("cputune")
    for k, v in bind_info.items():
        cpuset = ','.join([str(c) for c in v])
        cputune.append(etree.Element("vcpupin", vcpu=str(k), cpuset=cpuset))
        emulatorpin_cpuset.extend(v)
    emulatorpin_cpuset = list(set(emulatorpin_cpuset))
    emulatorpin_cpuset.sort()
    default_emulatorpin_cpuset_str = ','.join(
        map(lambda x: str(x), emulatorpin_cpuset))
    LOG.debug("emulatorpin_cpuset is %s", emulator_pin_bindcpu
              or default_emulatorpin_cpuset_str)
    cputune.append(
        etree.Element("emulatorpin",
                      cpuset=emulator_pin_bindcpu
                      or default_emulatorpin_cpuset_str))
    doc.append(cputune)
    # NOTE: when use huawei numa or bind, we should clean the cpuset of
    # vcpu element, if bind_info isn't {}, that means we shouldn't specify
    # the cpuset of vcpu element, if bind_info is {}, it will be returned
    # above.
    vcpu_element = doc.findall('vcpu')
    for vcpu_e in vcpu_element:
        doc.remove(vcpu_e)
    vcpu_element = etree.Element("vcpu")
    vcpu_element.text = str(len(bind_info))
    doc.append(vcpu_element)
    if instance_numa and instance_numa['cells'][0].get('is_huawei'):
        numa = doc.findall('numatune')
        for nm in numa:
            doc.remove(nm)
        numatune = etree.Element("numatune")
        cell_id = instance_numa['cells'][0]['id']
        cell_e = etree.Element("memory", mode="strict", nodeset=str(cell_id))
        numatune.append(cell_e)
        doc.append(numatune)

    def _update_numa_cell(doc, bind_info):
        cells = doc.findall('./cpu/numa/cell')
        for cell in cells:
            cell.attrib['cpus'] = ','.join(
                [str(vcpu) for vcpu in bind_info.keys()])

    _update_numa_cell(doc, bind_info)
    return etree.tostring(doc, pretty_print=True)
    def host_passes(self, host_state, filter_properties):
        """CpuBindFilter

        The instance numa topology is like:
        {u'instance_uuid': u'786d1430-dfe2-4423-8522-4a5394715b32',
        u'cells': [{u'cpuset': [0],
                   u'id': 0,
                   u'memory': 256},
                   {u'cpuset': [1,2,3],
                   u'id': 1,
                   u'memory': 768 }],
        u'id': 122}
        """

        inst_prop = filter_properties['request_spec'].get(
            'instance_properties')
        inst_numa_top = inst_prop.get('numa_topology') if inst_prop else None
        # get the pagesize from instance_type
        instance_type = filter_properties['request_spec'].get(
            'instance_type', None)
        pagesize = None
        if instance_type:
            pagesize = instance_type.get('extra_specs', {}).get(
                "hw:mem_page_size", None)

        if inst_numa_top:
            inst_numa_top = utils.convert_inst_numa_topology(inst_numa_top)

        vcpus = filter_properties.get('instance_type').get('vcpus')
        mem = filter_properties.get('instance_type').get('memory_mb')

        enable_bind, enable_ht, any_mode, numa_opts = \
            utils.get_inst_affinity_mask(filter_properties)

        request_spec = filter_properties['request_spec']
        host_numa_top = copy.deepcopy(jsonutils.loads(
            host_state.numa_topology or '{}'))
        if host_numa_top and host_numa_top.get('nova_object.data'):
            host_numa_top = utils.convert_host_numa_topology(
                host_numa_top)
        elif numa_opts or enable_bind or not any_mode or pagesize:
            LOG.debug('Host %s don\'t support numa,don\'t pass',
                      host_state.host)
            LOG.debug('options:enable_bind(%s), any_mode(%s), numa_opts(%s), '
                      'pagesize(%s)'
                      % (enable_bind, any_mode, numa_opts, pagesize))
            return False
        else:
            LOG.debug('Host %s don\'t support numa, pass', host_state.host)
            return True

        context = filter_properties['context'].elevated()
        hw_instance_extras = objects.HuaweiInstanceExtra.get_by_host(
            context, host_state.host)

        instance_uuids = request_spec.get('instance_uuids', [])
        instance_uuid = instance_uuids[0] if instance_uuids else None

        if utils.is_host_numa_confict(hw_instance_extras, numa_opts,
                                      instance_uuid):
            LOG.debug('Numa instance cannot booted with non-numa '
                      'instance in host %s', host_state.host)
            return False

        if utils.is_any_node_confict(hw_instance_extras, any_mode,
                                     instance_uuid):
            LOG.debug('any-mode instance cannot booted with non-any-mode '
                      'instance in host %s', host_state.host)
            return False
        if (enable_bind, any_mode, numa_opts) == (
                False, True, 0) and not pagesize:
            LOG.debug('Cpubindfilter passed, enable_bind(%s), any_mode(%s),'
                      ' numa_opts(%s)' % (enable_bind, any_mode, numa_opts))
            return True
        if not host_state.numa_topology:
            LOG.info("No numa topology info of host: %s found, cpu bind "
                     "filter failed", host_state.host)
            return False
        # when a numa vm migrate from host1 to host2, and host1 is empty
        # if you want to forbid to create a no-numa vm in host1
        # call this function earlier
        hw_instance_extras = \
            utils._extend_hw_instance_extra(context, host_state.host)
        utils.update_numa_topo_bind_info(host_numa_top,
                                         hw_instance_extras, any_mode,
                                         instance_uuid)
        LOG.debug("CpuBindFilter trying to filter instance: %(instance_uuid)s,"
                  " with host_numa_top:%(host_numa_top)s of host:%(host)s",
                  {"instance_uuid": instance_uuid, "host_numa_top":
                      host_numa_top, "host": host_state.host})
        enable_evs = utils.get_evs_affinity(filter_properties)

        sriov_numa_id = None
        evs_numa_id = None
        if numa_opts == 2:
            try:
                sriov_numa_id = utils.get_numa_id_with_vf_request(
                    host_state, filter_properties)
            except exception.NovaException as ex:
                LOG.info(ex.format_message())
                return False
        if enable_evs:
            try:
                evs_numa_id = utils.get_specific_numa(host_state,
                                                      filter_properties)
            except exception.NovaException as error:
                # if catch the exception, the host is not suitable for creating
                # evs instances.
                LOG.debug(error.format_message())
                return False

        if sriov_numa_id and enable_evs and sriov_numa_id != evs_numa_id:
            LOG.info("Both EVS Numa and IO Numa are specified, But SRIOV"
                     "(node:%s) and EVS(node:%s) devices are not in one "
                     "numa cell." % (sriov_numa_id, evs_numa_id))
            return False
        specific_numa_id = sriov_numa_id or evs_numa_id

        try:
            if inst_numa_top and not numa_opts:
                request_spec = filter_properties.get('request_spec', {})
                instance = request_spec.get('instance_properties', {})
                fitted_inst_numas = utils.get_fitted_inst_numas(instance,
                                                                host_state)
                utils.pin_vcpu_with_inst_numa(enable_bind, enable_ht,
                                              any_mode, fitted_inst_numas,
                                              host_numa_top)
            else:
                utils.pin_vcpu(enable_bind, enable_ht, any_mode, vcpus,
                               mem, host_numa_top, numa_opts,
                               numa_id=specific_numa_id, pagesize=pagesize,
                               host_state=host_state)
        except exception.NovaException as ex:
            LOG.debug(ex.format_message())
            return False
        return True