def get_numa_nodes(self): """Returns the host's list of NUMA nodes. :returns: list of dictionaries containing information about each host NUMA node. Each host has at least one NUMA node. """ numa_nodes = self._conn.Msvm_NumaNode() nodes_info = [] system_memory = self._conn.Msvm_Memory(['NumberOfBlocks']) processors = self._conn.Msvm_Processor(['DeviceID']) for node in numa_nodes: # Due to a bug in vmms, getting Msvm_Processor for the numa # node associators resulted in a vmms crash. # As an alternative to using associators we have to manually get # the related Msvm_Processor classes. # Msvm_HostedDependency is the association class between # Msvm_NumaNode and Msvm_Processor. We need to use this class to # relate the two because using associators on Msvm_Processor # will also result in a crash. numa_assoc = self._conn.Msvm_HostedDependency( Antecedent=node.path_()) numa_node_assoc = [item.Dependent for item in numa_assoc] memory_info = self._get_numa_memory_info(numa_node_assoc, system_memory) if not memory_info: LOG.warning( _LW("Could not find memory information for NUMA " "node. Skipping node measurements.")) continue cpu_info = self._get_numa_cpu_info(numa_node_assoc, processors) if not cpu_info: LOG.warning( _LW("Could not find CPU information for NUMA " "node. Skipping node measurements.")) continue node_info = { # NodeID has the format: Microsoft:PhysicalNode\<NODE_ID> 'id': node.NodeID.split('\\')[-1], # memory block size is 1MB. 'memory': memory_info.NumberOfBlocks, 'memory_usage': node.CurrentlyConsumableMemoryBlocks, # DeviceID has the format: Microsoft:UUID\0\<DEV_ID> 'cpuset': set([c.DeviceID.split('\\')[-1] for c in cpu_info]), # cpu_usage can be set, each CPU has a "LoadPercentage" 'cpu_usage': 0, } nodes_info.append(node_info) return nodes_info
def get_numa_nodes(self): """Returns the host's list of NUMA nodes. :returns: list of dictionaries containing information about each host NUMA node. Each host has at least one NUMA node. """ numa_nodes = self._conn.Msvm_NumaNode() nodes_info = [] system_memory = self._conn.Msvm_Memory(['NumberOfBlocks']) processors = self._conn.Msvm_Processor(['DeviceID']) for node in numa_nodes: # Due to a bug in vmms, getting Msvm_Processor for the numa # node associators resulted in a vmms crash. # As an alternative to using associators we have to manually get # the related Msvm_Processor classes. # Msvm_HostedDependency is the association class between # Msvm_NumaNode and Msvm_Processor. We need to use this class to # relate the two because using associators on Msvm_Processor # will also result in a crash. numa_assoc = self._conn.Msvm_HostedDependency( Antecedent=node.path_()) numa_node_assoc = [item.Dependent for item in numa_assoc] memory_info = self._get_numa_memory_info(numa_node_assoc, system_memory) if not memory_info: LOG.warning(_LW("Could not find memory information for NUMA " "node. Skipping node measurements.")) continue cpu_info = self._get_numa_cpu_info(numa_node_assoc, processors) if not cpu_info: LOG.warning(_LW("Could not find CPU information for NUMA " "node. Skipping node measurements.")) continue node_info = { # NodeID has the format: Microsoft:PhysicalNode\<NODE_ID> 'id': node.NodeID.split('\\')[-1], # memory block size is 1MB. 'memory': memory_info.NumberOfBlocks, 'memory_usage': node.CurrentlyConsumableMemoryBlocks, # DeviceID has the format: Microsoft:UUID\0\<DEV_ID> 'cpuset': set([c.DeviceID.split('\\')[-1] for c in cpu_info]), # cpu_usage can be set, each CPU has a "LoadPercentage" 'cpu_usage': 0, } nodes_info.append(node_info) return nodes_info
def set_disk_host_resource(self, vm_name, controller_path, address, mounted_disk_path): # TODO(lpetrut): remove this method after the patch fixing # swapped disks after host reboot merges in Nova. disk_found = False vmsettings = self._lookup_vm_check(vm_name) (disk_resources, volume_resources) = self._get_vm_disks(vmsettings) for disk_resource in disk_resources + volume_resources: if (disk_resource.Parent == controller_path and self._get_disk_resource_address(disk_resource) == str(address)): if (disk_resource.HostResource and disk_resource.HostResource[0] != mounted_disk_path): LOG.debug('Updating disk host resource "%(old)s" to ' '"%(new)s"' % {'old': disk_resource.HostResource[0], 'new': mounted_disk_path}) disk_resource.HostResource = [mounted_disk_path] self._jobutils.modify_virt_resource(disk_resource) disk_found = True break if not disk_found: LOG.warning(_LW('Disk not found on controller ' '"%(controller_path)s" with ' 'address "%(address)s"'), {'controller_path': controller_path, 'address': address})
def set_disk_host_resource(self, vm_name, controller_path, address, mounted_disk_path): # TODO(lpetrut): remove this method after the patch fixing # swapped disks after host reboot merges in Nova. disk_found = False vmsettings = self._lookup_vm_check(vm_name) (disk_resources, volume_resources) = self._get_vm_disks(vmsettings) for disk_resource in disk_resources + volume_resources: if (disk_resource.Parent == controller_path and self._get_disk_resource_address(disk_resource) == str(address)): if (disk_resource.HostResource and disk_resource.HostResource[0] != mounted_disk_path): LOG.debug( 'Updating disk host resource "%(old)s" to ' '"%(new)s"' % { 'old': disk_resource.HostResource[0], 'new': mounted_disk_path }) disk_resource.HostResource = [mounted_disk_path] self._jobutils.modify_virt_resource(disk_resource) disk_found = True break if not disk_found: LOG.warning( _LW('Disk not found on controller ' '"%(controller_path)s" with ' 'address "%(address)s"'), { 'controller_path': controller_path, 'address': address })
def set_disk_host_resource(self, vm_name, controller_path, address, mounted_disk_path): disk_found = False vm = self._lookup_vm_check(vm_name) (disk_resources, volume_resources) = self._get_vm_disks(vm) for disk_resource in disk_resources + volume_resources: if (disk_resource.Parent == controller_path and self._get_disk_resource_address(disk_resource) == str(address)): if (disk_resource.HostResource and disk_resource.HostResource[0] != mounted_disk_path): LOG.debug('Updating disk host resource "%(old)s" to ' '"%(new)s"' % {'old': disk_resource.HostResource[0], 'new': mounted_disk_path}) disk_resource.HostResource = [mounted_disk_path] self._jobutils.modify_virt_resource(disk_resource, vm) disk_found = True break if not disk_found: LOG.warning(_LW('Disk not found on controller ' '"%(controller_path)s" with ' 'address "%(address)s"'), {'controller_path': controller_path, 'address': address})
def is_host_guarded(self): """Checks the host is guarded so it can run Shielded VMs""" (return_code, host_config) = self._conn_hgs.MSFT_HgsClientConfiguration.Get() if return_code: LOG.warning( _LW('Retrieving the local Host Guardian Service ' 'Client configuration failed with code: %s'), return_code) return False return host_config.IsHostGuarded
def get_fc_hba_ports(self): hba_ports = [] adapter_count = self.get_fc_hba_count() for adapter_index in range(adapter_count): adapter_name = self._get_adapter_name(adapter_index) try: hba_ports += self._get_fc_hba_adapter_ports(adapter_name) except Exception as exc: msg = _LW("Could not retrieve FC HBA ports for " "adapter: %(adapter_name)s. " "Exception: %(exc)s") LOG.warning(msg, dict(adapter_name=adapter_name, exc=exc)) return hba_ports
def get_fc_hba_ports(self): hba_ports = [] adapter_count = self.get_fc_hba_count() for adapter_index in range(adapter_count): # We'll ignore unsupported FC HBA ports. try: adapter_name = self._get_adapter_name(adapter_index) except Exception as exc: msg = _LW("Could not retrieve FC HBA adapter name for " "adapter number: %(adapter_index)s. " "Exception: %(exc)s") LOG.warning(msg, dict(adapter_index=adapter_index, exc=exc)) continue try: hba_ports += self._get_fc_hba_adapter_ports(adapter_name) except Exception as exc: msg = _LW("Could not retrieve FC HBA ports for " "adapter: %(adapter_name)s. " "Exception: %(exc)s") LOG.warning(msg, dict(adapter_name=adapter_name, exc=exc)) return hba_ports
def _enable_metrics(self, element, metrics_names=None): if not metrics_names: definition_paths = [None] else: definition_paths = [] for metrics_name in metrics_names: metrics_def = self._metrics_defs.get(metrics_name) if not metrics_def: LOG.warning(_LW("Metric not found: %s"), metrics_name) continue definition_paths.append(metrics_def.path_()) element_path = element.path_() for definition_path in definition_paths: self._metrics_svc.ControlMetrics( Subject=element_path, Definition=definition_path, MetricCollectionEnabled=self._METRICS_ENABLED)