def testCpuTopologyX86_64_amd_6272(self): # 2 x AMD 6272 (with Modules) numa.update() t = numa.cpu_topology() self.assertEqual(t.threads, 32) self.assertEqual(t.cores, 16) self.assertEqual(t.sockets, 2)
def test_getCpuTopology(self): numa.update() t = numa.cpu_topology() self.assertEqual(t.threads, 8) self.assertEqual(t.cores, 4) self.assertEqual(t.sockets, 1) self.assertEqual(t.online_cpus, [0, 1, 2, 3, 4, 5, 6, 7])
def testCpuTopologyX86_64_intel_e31220(self): # 1 x Intel E31220 (normal Multi-core) numa.update() t = numa.cpu_topology() self.assertEqual(t.threads, 4) self.assertEqual(t.cores, 4) self.assertEqual(t.sockets, 1)
def testCpuTopologyS390X(self): # S390 1 socket, 4 cores, 1 threads per core numa.update() t = numa.cpu_topology() self.assertEqual(t.threads, 4) self.assertEqual(t.cores, 4) self.assertEqual(t.sockets, 1)
def testCpuTopologyX86_64_intel_e5649(self): # 2 x Intel E5649 (with Hyperthreading) numa.update() t = numa.cpu_topology() self.assertEqual(t.threads, 24) self.assertEqual(t.cores, 12) self.assertEqual(t.sockets, 2)
def testCpuTopologyPPC64(self): # PPC64 4 sockets, 5 cores, 1 threads per core numa.update() t = numa.cpu_topology() self.assertEqual(t.threads, 20) self.assertEqual(t.cores, 20) self.assertEqual(t.sockets, 4)
def test_core_cpus(self): # 2 sockets, 6 cores per socket, 2 threads per core numa.update() cpus = numa.core_cpus() assert len(cpus) == 12 assert cpus[(0, 0, 0)] == {0, 12} assert cpus[(0, 0, 1)] == {8, 20} assert cpus[(1, 0, 0)] == {1, 13} assert cpus[(1, 0, 1)] == {9, 21}
def testNumaNodeDistance(self): numa.update() t = numa.distances() expectedDistanceInfo = { '0': [10, 20, 40, 40], '1': [20, 10, 40, 40], '16': [40, 40, 10, 20], '17': [40, 40, 20, 10] } self.assertEqual(t, expectedDistanceInfo)
def testNumaTopology(self): # 2 x AMD 6272 (with Modules) numa.update() t = numa.topology() expectedNumaInfo = { '0': { 'cpus': [0, 1, 2, 3, 4, 5, 6, 7], 'totalMemory': '49141', 'hugepages': { 4: { 'totalPages': '2500' }, 2048: { 'totalPages': '100' } } }, '1': { 'cpus': [8, 9, 10, 11, 12, 13, 14, 15], 'totalMemory': '49141', 'hugepages': { 4: { 'totalPages': '2' }, 2048: { 'totalPages': '1' } } }, '2': { 'cpus': [16, 17, 18, 19, 20, 21, 22, 23], 'totalMemory': '49141', 'hugepages': { 4: { 'totalPages': '0' }, 2048: { 'totalPages': '0' } } }, '3': { 'cpus': [24, 25, 26, 27, 28, 29, 30, 31], 'totalMemory': '49141', 'hugepages': { 4: { 'totalPages': '2500' }, 2048: { 'totalPages': '100' } } } } self.assertEqual(t, expectedNumaInfo)
def _assign_shared(cif, target_vm=None): """ Assign all CPUs from shared pool to all VMs with no policy or to a specific VM with no policy. :param target_vm: A VM instance, CPUs of which are to be configured with shared pool CPU set. If None, all VMs with no specific policy will be reconfigured with current shared pool CPU set. :type target_vm: vdsm.virt.VM or None """ numa.update() core_cpus = numa.core_cpus() cpu_topology = numa.cpu_topology() cpu_list_length = max(cpu_topology.online_cpus) + 1 with _shared_pool_lock: shared_cpus = _shared_pool(cif, cpu_topology.online_cpus, core_cpus) shared_str = ','.join(map(str, shared_cpus)) cpuset = libvirt_cpuset_spec(shared_cpus, cpu_list_length) if target_vm is None: vms_to_update = cif.getVMs().values() else: vms_to_update = [target_vm] for vm in vms_to_update: if vm.cpu_policy() not in (CPU_POLICY_NONE, CPU_POLICY_MANUAL): continue try: for vcpu in range(vm.get_number_of_cpus()): if (vm.cpu_policy() == CPU_POLICY_MANUAL and vcpu in vm.manually_pinned_cpus()): continue vm.log.debug('configuring vCPU=%d with cpuset="%s"', vcpu, shared_str) try: vm.pin_vcpu(vcpu, cpuset) except virdomain.NotConnectedError: vm.log.warning( "Cannot reconfigure CPUs, domain not connected.") except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN: vm.log.warning('Cannot reconfigure CPUs,' ' domain does not exist anymore.') else: raise except: vm.log.exception( 'Failed to update CPU set of the VM to match shared pool')
def test_shared_pool(): # 2 sockets, 6 cores per socket, 2 threads per core numa.update() core_cpus = numa.core_cpus() online_cpus = list(range(24)) cif = FakeClientIF({ 0: FakeVM( cpumanagement.CPU_POLICY_NONE, { # included in shared pool 0: frozenset([2]), 1: frozenset([14]), }), 1: FakeVM( cpumanagement.CPU_POLICY_MANUAL, { # included in shared pool 0: frozenset([0]), 1: frozenset([1]), }), 2: FakeVM(cpumanagement.CPU_POLICY_DEDICATED, { 0: frozenset([12]), 1: frozenset([13]), }), 3: FakeVM( cpumanagement.CPU_POLICY_ISOLATE_THREADS, { 0: frozenset([4]), # blocks also 16 1: frozenset([6]), # blocks also 18 }), 4: FakeVM( cpumanagement.CPU_POLICY_SIBLINGS, { 0: frozenset([8]), 1: frozenset([10]), 2: frozenset([20]), # blocks also 22 }), }) pool = cpumanagement._shared_pool(cif, online_cpus, core_cpus) assert pool == {0, 1, 2, 3, 5, 7, 9, 11, 14, 15, 17, 19, 21, 23}
def get(): numa.update() caps = {} cpu_topology = numa.cpu_topology() caps['kvmEnabled'] = str(os.path.exists('/dev/kvm')).lower() if config.getboolean('vars', 'report_host_threads_as_cores'): caps['cpuCores'] = str(cpu_topology.threads) else: caps['cpuCores'] = str(cpu_topology.cores) caps['cpuThreads'] = str(cpu_topology.threads) caps['cpuSockets'] = str(cpu_topology.sockets) caps['onlineCpus'] = ','.join( [str(cpu_id) for cpu_id in cpu_topology.online_cpus]) caps['cpuTopology'] = [{ 'cpu_id': cpu.cpu_id, 'numa_cell_id': cpu.numa_cell_id, 'socket_id': cpu.socket_id, 'die_id': cpu.die_id, 'core_id': cpu.core_id, } for cpu in numa.cpu_info()] caps['cpuSpeed'] = cpuinfo.frequency() caps['cpuModel'] = cpuinfo.model() caps['cpuFlags'] = ','.join(_getFlagsAndFeatures()) caps['vdsmToCpusAffinity'] = list(taskset.get(os.getpid())) caps.update(dsaversion.version_info()) proxy = supervdsm.getProxy() net_caps = proxy.network_caps() caps.update(net_caps) caps['ovnConfigured'] = proxy.is_ovn_configured() try: caps['hooks'] = hooks.installed() except: logging.debug('not reporting hooks', exc_info=True) caps['operatingSystem'] = osinfo.version() caps['uuid'] = host.uuid() caps['packages2'] = osinfo.package_versions() caps['realtimeKernel'] = osinfo.runtime_kernel_flags().realtime caps['kernelArgs'] = osinfo.kernel_args() caps['nestedVirtualization'] = osinfo.nested_virtualization().enabled caps['emulatedMachines'] = machinetype.emulated_machines( cpuarch.effective()) caps['ISCSIInitiatorName'] = _getIscsiIniName() caps['HBAInventory'] = hba.HBAInventory() caps['vmTypes'] = ['kvm'] caps['memSize'] = str(utils.readMemInfo()['MemTotal'] // 1024) caps['reservedMem'] = str( config.getint('vars', 'host_mem_reserve') + config.getint('vars', 'extra_mem_reserve')) caps['guestOverhead'] = config.get('vars', 'guest_ram_overhead') caps['rngSources'] = rngsources.list_available() caps['numaNodes'] = dict(numa.topology()) caps['numaNodeDistance'] = dict(numa.distances()) caps['autoNumaBalancing'] = numa.autonuma_status() caps['selinux'] = osinfo.selinux_status() caps['liveSnapshot'] = 'true' caps['liveMerge'] = 'true' caps['kdumpStatus'] = osinfo.kdump_status() caps["deferred_preallocation"] = True caps['hostdevPassthrough'] = str(hostdev.is_supported()).lower() # TODO This needs to be removed after adding engine side support # and adding gdeploy support to enable libgfapi on RHHI by default caps['additionalFeatures'] = ['libgfapi_supported'] if osinfo.glusterEnabled: from vdsm.gluster.api import glusterAdditionalFeatures caps['additionalFeatures'].extend(glusterAdditionalFeatures()) caps['hostedEngineDeployed'] = _isHostedEngineDeployed() caps['hugepages'] = hugepages.supported() caps['kernelFeatures'] = osinfo.kernel_features() caps['vncEncrypted'] = _isVncEncrypted() caps['backupEnabled'] = True caps['coldBackupEnabled'] = True caps['clearBitmapsEnabled'] = True caps['fipsEnabled'] = _getFipsEnabled() try: caps['boot_uuid'] = osinfo.boot_uuid() except Exception: logging.exception("Can not find boot uuid") caps['tscFrequency'] = _getTscFrequency() caps['tscScaling'] = _getTscScaling() try: caps["connector_info"] = managedvolume.connector_info() except se.ManagedVolumeNotSupported as e: logging.info("managedvolume not supported: %s", e) except se.ManagedVolumeHelperFailed as e: logging.exception("Error getting managedvolume connector info: %s", e) # Which domain versions are supported by this host. caps["domain_versions"] = sc.DOMAIN_VERSIONS caps["supported_block_size"] = backends.supported_block_size() caps["cd_change_pdiv"] = True caps["refresh_disk_supported"] = True return caps
def test_topology(self): numa.update() result = numa.topology() # only check cpus, memory does not come from file expected = [0, 1, 2, 3, 4, 5, 6, 7] self.assertEqual(expected, result['0']['cpus'])
def test_siblings_no_smt(): # 1 socket, 4 cores per socket, 1 threads per core numa.update() cpus = numa.core_cpus() assert cpumanagement._siblings(cpus, 0) == frozenset()
def test_siblings(): # 2 sockets, 6 cores per socket, 2 threads per core numa.update() cpus = numa.core_cpus() assert cpumanagement._siblings(cpus, 0) == frozenset([12]) assert cpumanagement._siblings(cpus, 8) == frozenset([20])