def _test_core_bind(self, context, instance, resource_tracker): LOG.debug("get instance cpu bind info in _test_core_bind") filter_properties = {} inst_extra = objects.HuaweiInstanceExtra.get_by_instance_uuid( context, instance.uuid) if inst_extra: scheduler_hints = jsonutils.loads(inst_extra.scheduler_hints or '{}') stats = jsonutils.loads(inst_extra.stats or '{}') else: scheduler_hints = {} stats = {} filter_properties['scheduler_hints'] = scheduler_hints filter_properties['stats'] = stats pci_requests = objects.InstancePCIRequests.get_by_instance_uuid( context, instance['uuid']) if pci_requests: filter_properties['pci_requests'] = pci_requests bind_info, instance_numa, enable_ht = sched_utils.get_inst_cpu_bind_info( instance, resource_tracker.host, filter_properties=filter_properties) sched_utils.update_cpu_bind_info_to_db(bind_info, instance.uuid, instance_numa) if instance_numa and instance_numa['cells'][0].get('is_huawei'): cells = [] for cell in instance_numa['cells']: cells.append( objects.InstanceNUMACell(id=cell['id'], cpuset=set(cell['cpuset']), memory=cell['mem']['total'], pagesize=cell.get('pagesize'))) format_inst_numa = objects.InstanceNUMATopology(cells=cells) self.claimed_numa_topology = format_inst_numa self.instance['numa_topology'] = format_inst_numa
def _test_core_bind(self, context, instance, resource_tracker): LOG.debug("get instance cpu bind info in _test_core_bind") filter_properties = {} inst_extra = objects.HuaweiInstanceExtra.get_by_instance_uuid( context, instance.uuid) if inst_extra: scheduler_hints = jsonutils.loads( inst_extra.scheduler_hints or '{}') stats = jsonutils.loads(inst_extra.stats or '{}') else: scheduler_hints = {} stats = {} filter_properties['scheduler_hints'] = scheduler_hints filter_properties['stats'] = stats pci_requests = objects.InstancePCIRequests.get_by_instance_uuid( context, instance['uuid']) if pci_requests: filter_properties['pci_requests'] = pci_requests bind_info, instance_numa, enable_ht = sched_utils.get_inst_cpu_bind_info( instance, resource_tracker.host, filter_properties=filter_properties) sched_utils.update_cpu_bind_info_to_db(bind_info, instance.uuid, instance_numa) if instance_numa and instance_numa['cells'][0].get('is_huawei'): cells = [] for cell in instance_numa['cells']: cells.append(objects.InstanceNUMACell( id=cell['id'], cpuset=set(cell['cpuset']), memory=cell['mem']['total'], pagesize=cell.get('pagesize'))) format_inst_numa = objects.InstanceNUMATopology(cells=cells) self.claimed_numa_topology = format_inst_numa self.instance['numa_topology'] = format_inst_numa
def update_cpu_bind_info_to_xml(xml, instance, driver, network_info): admin_context = nova_context.get_admin_context() inst_extra = objects.HuaweiInstanceExtra.get_by_instance_uuid( admin_context, instance.uuid) scheduler_hints = jsonutils.loads(inst_extra.scheduler_hints or '{}') def _is_sriov_instance(xml): doc = etree.fromstring(xml) interfaces = doc.findall('./devices/interface') for interface in interfaces: if "type" in interface.keys(): if interface.attrib['type'] == 'hostdev': return True return False LOG.debug("Instance %s in %s task_state, get binding_info from db" % (instance.uuid, instance.task_state)) bind_info = jsonutils.loads(inst_extra.core_bind or '[]') vcpus = [cell['vcpu'] for cell in bind_info] pcpus = [cell['pcpus'] for cell in bind_info] bind_info = dict(zip(vcpus, pcpus)) enable_bind, enable_ht, any_mode, numa_opts = \ hw_shed_utils.get_inst_affinity_mask( dict(scheduler_hints=scheduler_hints)) if instance.numa_topology: cell = instance.numa_topology.cells[0] instance_numa = { "cells": [{ "mem": { "total": cell.memory }, "cpuset": list(cell.cpuset), "id": cell.id, "pagesize": cell.pagesize }] } if numa_opts: instance_numa['cells'][0]['is_huawei'] = True else: instance_numa = None # if existed vhostuser port, we should change the xml if libvirt_ext_utils.get_physical_network(network_info): # modify xml for evs vhostuser port, if the vif details exist physical # network, we think the port is vhostuser port. xml = modify_xml_for_evs_vhostuser(xml) LOG.debug(_("modify_xml_for_evs_vhostuser is %s"), xml) LOG.debug( "Binding cpu, the bind_info is %(bind_info)s, the instance" " numa is %(instance_numa)s", { 'bind_info': bind_info, 'instance_numa': instance_numa }) if not bind_info: return xml doc = etree.fromstring(xml) cpu_element = doc.find("cpu") vcpu_topo = (cpu_element.find("topology") if cpu_element is not None else None) ht = scheduler_hints.get('hyperThreadAffinity', 'any') db_vcpu_topo = {} sockets = vcpu_topo.get("sockets") cores = vcpu_topo.get("cores") threads = vcpu_topo.get("threads") if vcpu_topo is not None: if ht == 'lock' and enable_ht and len( bind_info) > 2 and not _is_sriov_instance(xml): threads = 2 cores = 1 if len(bind_info) % 2 == 0: sockets = len(bind_info) / (threads * cores) - 1 vcpu_topo.set("sockets", str(sockets)) vcpu_topo.set("threads", str(threads)) vcpu_topo.set("cores", str(cores)) else: msg = ( "Cannot set vcpu topology in sync mode, the bind_info" " is %(bind_info)s, the instance numa is %(instance_numa" ")s" % { 'bind_info': bind_info, 'instance_numa': instance_numa }) raise Exception(msg) else: if threads == '1' and enable_ht: if int(sockets) % 2 == 0: sockets = str(int(sockets) / 2) vcpu_topo.set("sockets", sockets) threads = str(int(threads) * 2) vcpu_topo.set("threads", threads) elif int(cores) % 2 == 0: cores = str(int(cores) / 2) vcpu_topo.set("cores", cores) threads = str(int(threads) * 2) vcpu_topo.set("threads", threads) else: msg = ( "Cannot set vcpu topology in sync mode, the bind_info" " is %(bind_info)s, the instance numa is %(instance_numa" ")s" % { 'bind_info': bind_info, 'instance_numa': instance_numa }) raise Exception(msg) db_vcpu_topo = { 'sockets': int(sockets), 'cores': int(cores), 'threads': int(threads) } hw_shed_utils.update_cpu_bind_info_to_db(bind_info, instance.uuid, instance_numa, vcpu_topo=db_vcpu_topo) cpu = doc.findall('cputune') for c in cpu: doc.remove(c) emulator_pin_bindcpu = None if ht == 'lock': if CONF.emulator_pin_bindcpu and _is_sriov_instance(xml): emulator_pin_bindcpu = CONF.emulator_pin_bindcpu else: cells = jsonutils.loads( driver.host_state._stats['numa_topology']).get( 'nova_object.data', {}).get('cells', []) all_siblings = [] for cell in cells: _siblings = cell.get('nova_object.data', {}).get('siblings', []) all_siblings = all_siblings + _siblings if len(bind_info) > 2: last_cpu_idx = bind_info[sorted(bind_info.keys())[-1]][0] for core in all_siblings: if last_cpu_idx in core: for (k, v) in bind_info.items(): if v[0] in core: del bind_info[k] emulator_pin_bindcpu = ",".join([str(c) for c in core]) break new_bind_info = {} sorted_keys = sorted(bind_info.keys()) for idx, key in enumerate(sorted_keys): new_bind_info[idx] = bind_info[key] bind_info = new_bind_info emulatorpin_cpuset = [] cputune = etree.Element("cputune") for k, v in bind_info.items(): cpuset = ','.join([str(c) for c in v]) cputune.append(etree.Element("vcpupin", vcpu=str(k), cpuset=cpuset)) emulatorpin_cpuset.extend(v) emulatorpin_cpuset = list(set(emulatorpin_cpuset)) emulatorpin_cpuset.sort() default_emulatorpin_cpuset_str = ','.join( map(lambda x: str(x), emulatorpin_cpuset)) LOG.debug("emulatorpin_cpuset is %s", emulator_pin_bindcpu or default_emulatorpin_cpuset_str) cputune.append( etree.Element("emulatorpin", cpuset=emulator_pin_bindcpu or default_emulatorpin_cpuset_str)) doc.append(cputune) # NOTE: when use huawei numa or bind, we should clean the cpuset of # vcpu element, if bind_info isn't {}, that means we shouldn't specify # the cpuset of vcpu element, if bind_info is {}, it will be returned # above. vcpu_element = doc.findall('vcpu') for vcpu_e in vcpu_element: doc.remove(vcpu_e) vcpu_element = etree.Element("vcpu") vcpu_element.text = str(len(bind_info)) doc.append(vcpu_element) if instance_numa and instance_numa['cells'][0].get('is_huawei'): numa = doc.findall('numatune') for nm in numa: doc.remove(nm) numatune = etree.Element("numatune") cell_id = instance_numa['cells'][0]['id'] cell_e = etree.Element("memory", mode="strict", nodeset=str(cell_id)) numatune.append(cell_e) doc.append(numatune) def _update_numa_cell(doc, bind_info): cells = doc.findall('./cpu/numa/cell') for cell in cells: cell.attrib['cpus'] = ','.join( [str(vcpu) for vcpu in bind_info.keys()]) _update_numa_cell(doc, bind_info) return etree.tostring(doc, pretty_print=True)
def update_cpu_bind_info_to_xml(xml, instance, driver, network_info): admin_context = nova_context.get_admin_context() inst_extra = objects.HuaweiInstanceExtra.get_by_instance_uuid( admin_context, instance.uuid) scheduler_hints = jsonutils.loads(inst_extra.scheduler_hints or '{}') def _is_sriov_instance(xml): doc = etree.fromstring(xml) interfaces = doc.findall('./devices/interface') for interface in interfaces: if "type" in interface.keys(): if interface.attrib['type'] == 'hostdev': return True return False LOG.debug("Instance %s in %s task_state, get binding_info from db" % (instance.uuid, instance.task_state)) bind_info = jsonutils.loads(inst_extra.core_bind or '[]') vcpus = [cell['vcpu'] for cell in bind_info] pcpus = [cell['pcpus'] for cell in bind_info] bind_info = dict(zip(vcpus, pcpus)) enable_bind, enable_ht, any_mode, numa_opts = \ hw_shed_utils.get_inst_affinity_mask( dict(scheduler_hints=scheduler_hints)) if instance.numa_topology: cell = instance.numa_topology.cells[0] instance_numa = {"cells": [{"mem": {"total": cell.memory}, "cpuset": list(cell.cpuset), "id": cell.id, "pagesize": cell.pagesize}]} if numa_opts: instance_numa['cells'][0]['is_huawei'] = True else: instance_numa = None # if existed vhostuser port, we should change the xml if libvirt_ext_utils.get_physical_network(network_info): # modify xml for evs vhostuser port, if the vif details exist physical # network, we think the port is vhostuser port. xml = modify_xml_for_evs_vhostuser(xml) LOG.debug(_("modify_xml_for_evs_vhostuser is %s"), xml) LOG.debug("Binding cpu, the bind_info is %(bind_info)s, the instance" " numa is %(instance_numa)s", {'bind_info': bind_info, 'instance_numa': instance_numa}) if not bind_info: return xml doc = etree.fromstring(xml) cpu_element = doc.find("cpu") vcpu_topo = (cpu_element.find("topology") if cpu_element is not None else None) ht = scheduler_hints.get('hyperThreadAffinity', 'any') db_vcpu_topo = {} sockets = vcpu_topo.get("sockets") cores = vcpu_topo.get("cores") threads = vcpu_topo.get("threads") if vcpu_topo is not None: if ht == 'lock' and enable_ht and len(bind_info) > 2 and not _is_sriov_instance(xml): threads = 2 cores = 1 if len(bind_info) % 2 == 0: sockets = len(bind_info) / (threads * cores) - 1 vcpu_topo.set("sockets", str(sockets)) vcpu_topo.set("threads", str(threads)) vcpu_topo.set("cores", str(cores)) else: msg = ("Cannot set vcpu topology in sync mode, the bind_info" " is %(bind_info)s, the instance numa is %(instance_numa" ")s" % {'bind_info': bind_info, 'instance_numa': instance_numa}) raise Exception(msg) else: if threads == '1' and enable_ht: if int(sockets) % 2 == 0: sockets = str(int(sockets) / 2) vcpu_topo.set("sockets", sockets) threads = str(int(threads) * 2) vcpu_topo.set("threads", threads) elif int(cores) % 2 == 0: cores = str(int(cores) / 2) vcpu_topo.set("cores", cores) threads = str(int(threads) * 2) vcpu_topo.set("threads", threads) else: msg = ("Cannot set vcpu topology in sync mode, the bind_info" " is %(bind_info)s, the instance numa is %(instance_numa" ")s" % {'bind_info': bind_info, 'instance_numa': instance_numa}) raise Exception(msg) db_vcpu_topo = {'sockets': int(sockets), 'cores': int(cores), 'threads': int(threads)} hw_shed_utils.update_cpu_bind_info_to_db(bind_info, instance.uuid, instance_numa, vcpu_topo=db_vcpu_topo) cpu = doc.findall('cputune') for c in cpu: doc.remove(c) emulator_pin_bindcpu = None if ht == 'lock': if CONF.emulator_pin_bindcpu and _is_sriov_instance(xml): emulator_pin_bindcpu = CONF.emulator_pin_bindcpu else: cells = jsonutils.loads( driver.host_state._stats['numa_topology']).get( 'nova_object.data', {}).get('cells', []) all_siblings = [] for cell in cells: _siblings = cell.get('nova_object.data', {}).get('siblings', []) all_siblings = all_siblings + _siblings if len(bind_info) > 2: last_cpu_idx = bind_info[sorted(bind_info.keys())[-1]][0] for core in all_siblings: if last_cpu_idx in core: for (k, v) in bind_info.items(): if v[0] in core: del bind_info[k] emulator_pin_bindcpu = ",".join([str(c) for c in core]) break new_bind_info = {} sorted_keys = sorted(bind_info.keys()) for idx, key in enumerate(sorted_keys): new_bind_info[idx] = bind_info[key] bind_info = new_bind_info emulatorpin_cpuset = [] cputune = etree.Element("cputune") for k, v in bind_info.items(): cpuset = ','.join([str(c) for c in v]) cputune.append(etree.Element("vcpupin", vcpu=str(k), cpuset=cpuset)) emulatorpin_cpuset.extend(v) emulatorpin_cpuset = list(set(emulatorpin_cpuset)) emulatorpin_cpuset.sort() default_emulatorpin_cpuset_str = ','.join(map(lambda x: str(x), emulatorpin_cpuset)) LOG.debug("emulatorpin_cpuset is %s", emulator_pin_bindcpu or default_emulatorpin_cpuset_str) cputune.append(etree.Element("emulatorpin", cpuset=emulator_pin_bindcpu or default_emulatorpin_cpuset_str)) doc.append(cputune) # NOTE: when use huawei numa or bind, we should clean the cpuset of # vcpu element, if bind_info isn't {}, that means we shouldn't specify # the cpuset of vcpu element, if bind_info is {}, it will be returned # above. vcpu_element = doc.findall('vcpu') for vcpu_e in vcpu_element: doc.remove(vcpu_e) vcpu_element = etree.Element("vcpu") vcpu_element.text = str(len(bind_info)) doc.append(vcpu_element) if instance_numa and instance_numa['cells'][0].get('is_huawei'): numa = doc.findall('numatune') for nm in numa: doc.remove(nm) numatune = etree.Element("numatune") cell_id = instance_numa['cells'][0]['id'] cell_e = etree.Element("memory", mode="strict", nodeset=str(cell_id)) numatune.append(cell_e) doc.append(numatune) def _update_numa_cell(doc, bind_info): cells = doc.findall('./cpu/numa/cell') for cell in cells: cell.attrib['cpus'] = ','.join([str(vcpu) for vcpu in bind_info.keys()]) _update_numa_cell(doc, bind_info) return etree.tostring(doc, pretty_print=True)