def _to_dict(self): # NOTE(sahid): Used as legacy, could be renamed in # _legacy_to_dict_ to the future to avoid confusing. return {'cpus': hardware.format_cpu_spec(self.cpuset, allow_ranges=False), 'mem': {'total': self.memory}, 'id': self.id, 'pagesize': self.pagesize}
def _to_dict(self): return { 'id': self.id, 'cpus': hardware.format_cpu_spec( self.cpuset, allow_ranges=False), 'mem': { 'total': self.memory, 'used': self.memory_usage}, 'cpu_usage': self.cpu_usage}
def _format_basic_props(self, root): root.append(self._text_node("uuid", self.uuid)) root.append(self._text_node("name", self.name)) root.append(self._text_node("memory", self.memory)) if self.cpuset is not None: vcpu = self._text_node("vcpu", self.vcpus) vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset)) root.append(vcpu) else: root.append(self._text_node("vcpu", self.vcpus))
def _to_dict(self): return { 'id': self.id, 'cpus': hardware.format_cpu_spec(self.cpuset, allow_ranges=False), 'mem': { 'total': self.memory, 'used': self.memory_usage }, 'cpu_usage': self.cpu_usage }
def _to_dict(self): # NOTE(sahid): Used as legacy, could be renamed in # _legacy_to_dict_ to the future to avoid confusing. return { 'cpus': hardware.format_cpu_spec(self.cpuset, allow_ranges=False), 'mem': { 'total': self.memory }, 'id': self.id, 'pagesize': self.pagesize }
def to_dict_legacy(self, host_topology): cells = [] for cell in host_topology.cells: cells.append( {'cpus': hardware.format_cpu_spec( cell.cpuset, allow_ranges=False), 'mem': {'total': cell.memory, 'limit': cell.memory * self.ram_allocation_ratio}, 'cpu_limit': len(cell.cpuset) * self.cpu_allocation_ratio, 'id': cell.id}) return {'cells': cells}
def __repr__(self): return ('{obj_name} (id: {id}) ' 'cpus: {cpus} ' 'mem: total: {total} used: {used} ' 'cpu_usage: {cpu_usage} ' 'siblings: {siblings} ' 'pinned_cpus: {pinned_cpus} ' 'mempages: {mempages} '.format( obj_name=self.obj_name(), id=self.id, cpus=hardware.format_cpu_spec(self.cpuset, allow_ranges=True), total=self.memory, used=self.memory_usage, cpu_usage=self.cpu_usage if ('cpu_usage' in self) else None, siblings=self.siblings, pinned_cpus=hardware.format_cpu_spec(self.pinned_cpus, allow_ranges=True), mempages=self.mempages, ))
def __repr__(self): return '{obj_name} (id: {id}) ' \ 'cpuset: {cpuset} ' \ 'shared_vcpu: {shared_vcpu} ' \ 'shared_pcpu_for_vcpu: {shared_pcpu_for_vcpu} ' \ 'memory: {memory} ' \ 'physnode: {physnode} ' \ 'pagesize: {pagesize} ' \ 'cpu_topology: {cpu_topology} ' \ 'cpu_pinning: {cpu_pinning} ' \ 'siblings: {siblings} ' \ 'cpu_policy: {cpu_policy} ' \ 'cpu_thread_policy: {cpu_thread_policy} ' \ 'l3_cpuset: {l3_cpuset} ' \ 'l3_both_size: {l3_both_size} ' \ 'l3_code_size: {l3_code_size} ' \ 'l3_data_size: {l3_data_size}'.format( obj_name=self.obj_name(), id=self.id if ('id' in self) else None, cpuset=hardware.format_cpu_spec( self.cpuset, allow_ranges=True), shared_vcpu=self.shared_vcpu, shared_pcpu_for_vcpu=self.shared_pcpu_for_vcpu, memory=self.memory, physnode=self.physnode, pagesize=self.pagesize, cpu_topology=self.cpu_topology if ( 'cpu_topology' in self) else None, cpu_pinning=self.cpu_pinning, siblings=self.siblings, cpu_policy=self.cpu_policy, cpu_thread_policy=self.cpu_thread_policy, l3_cpuset=hardware.format_cpu_spec( self.l3_cpuset or [], allow_ranges=True), l3_both_size=self.l3_both_size, l3_code_size=self.l3_code_size, l3_data_size=self.l3_data_size, )
def test_format_cpu_spec(self): cpus = set([]) spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = [] spec = hw.format_cpu_spec(cpus) self.assertEqual("", spec) cpus = set([1, 3]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = [1, 3] spec = hw.format_cpu_spec(cpus) self.assertEqual("1,3", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus) self.assertEqual("1-2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus) self.assertEqual("10-11,13-16,19-20,40,42,48", spec) cpus = set([1, 2, 4, 6]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = [1, 2, 4, 6] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("1,2,4,6", spec) cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]) spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec) cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48] spec = hw.format_cpu_spec(cpus, allow_ranges=False) self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
def _update_numa_xml(xml_doc, driver_interface, instance): image_meta = objects.ImageMeta.from_instance(instance) # WRS: Use numa topology and allowed cpus from destination host. allowed_cpus = instance.migration_context.new_allowed_cpus # TODO(sahid/cfriesen): If the destination has more numa nodes than the # source then this could cause problems due to the loop over topology.cells # where "topology" is for the source host. numa_config = driver_interface.get_guest_numa_config( instance.migration_context.new_numa_topology, instance.flavor, allowed_cpus, image_meta) if numa_config[1] is None: # Quit early if the instance does not provide numa topology. return xml_doc membacking = driver_interface.get_guest_memory_backing_config( instance.numa_topology, numa_config.numatune, instance.flavor) cpu = driver_interface.get_guest_cpu_config(instance.flavor, image_meta, numa_config.numaconfig, instance.numa_topology) # We need to treat cpuset slightly differently - it's just an # attribute on the vcpu element, which we expect is always there vcpu = xml_doc.find('./vcpu') new_vcpu = etree.Element("vcpu") new_vcpu.text = six.text_type(instance.flavor.vcpus) if numa_config.cpuset: new_vcpu.set("cpuset", hardware.format_cpu_spec(numa_config.cpuset)) if vcpu is not None: xml_doc.remove(vcpu) xml_doc.append(new_vcpu) # WRS: It's possible that a guest cpu changes while migrating to a host # that has a different physical host cpu topology (for e.g. HT to non-HT). # Guest cpu topology can't change during live migration. For this reason, # we always keep the guest cpu topology that is currently used in the # running instance. The topology stays the same but the cpu_tune still # gets recalculated based on the InstanceNUMATopology that was # claimed on the destination. old_cpu_xml = xml_doc.find('./cpu') old_cpu = vconfig.LibvirtConfigCPU() old_cpu.parse_dom(old_cpu_xml) cpu.sockets = old_cpu.sockets cpu.cores = old_cpu.cores cpu.threads = old_cpu.threads def replace_from_config(tag, config): """Replace a top level node with xml generated by config """ elem = xml_doc.find('./' + tag) if elem is not None: xml_doc.remove(elem) if config: new_elem = config.format_dom() xml_doc.append(new_elem) replace = [('cputune', numa_config.cputune), ('numatune', numa_config.numatune), ('memoryBacking', membacking), ('cpu', cpu)] for tag, config in replace: replace_from_config(tag, config) return xml_doc