def get_inventory(self, nodename): """Return a dict, keyed by resource class, of inventory information for the supplied node. """ stats = vm_util.get_stats_from_cluster(self._session, self._cluster_ref) datastores = ds_util.get_available_datastores(self._session, self._cluster_ref, self._datastore_regex) total_disk_capacity = sum([ds.capacity for ds in datastores]) max_free_space = max([ds.freespace for ds in datastores]) reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb( CONF.reserved_host_disk_mb) result = { fields.ResourceClass.VCPU: { 'total': stats['cpu']['vcpus'], 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': stats['cpu']['max_vcpus_per_host'], 'step_size': 1, }, fields.ResourceClass.MEMORY_MB: { 'total': stats['mem']['total'], 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': stats['mem']['max_mem_mb_per_host'], 'step_size': 1, }, fields.ResourceClass.DISK_GB: { 'total': total_disk_capacity // units.Gi, 'reserved': reserved_disk_gb, 'min_unit': 1, 'max_unit': max_free_space // units.Gi, 'step_size': 1, }, } return result
def get_inventory(self, nodename): """Return a dict, keyed by resource class, of inventory information for the supplied node. """ stats = vm_util.get_stats_from_cluster(self._session, self._cluster_ref) datastores = ds_util.get_available_datastores(self._session, self._cluster_ref, self._datastore_regex) total_disk_capacity = sum([ds.capacity for ds in datastores]) max_free_space = max([ds.freespace for ds in datastores]) reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb( CONF.reserved_host_disk_mb) result = { obj_fields.ResourceClass.VCPU: { 'total': stats['cpu']['vcpus'], 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': stats['cpu']['max_vcpus_per_host'], 'step_size': 1, }, obj_fields.ResourceClass.MEMORY_MB: { 'total': stats['mem']['total'], 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': stats['mem']['max_mem_mb_per_host'], 'step_size': 1, }, obj_fields.ResourceClass.DISK_GB: { 'total': total_disk_capacity // units.Gi, 'reserved': reserved_disk_gb, 'min_unit': 1, 'max_unit': max_free_space // units.Gi, 'step_size': 1, }, } return result
def update_provider_tree(self, provider_tree, nodename, allocations=None): """Update a ProviderTree object with current resource provider, inventory information and CPU traits. :param nova.compute.provider_tree.ProviderTree provider_tree: A nova.compute.provider_tree.ProviderTree object representing all the providers in the tree associated with the compute node, and any sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE`` trait) associated via aggregate with any of those providers (but not *their* tree- or aggregate-associated providers), as currently known by placement. :param nodename: String name of the compute node (i.e. ComputeNode.hypervisor_hostname) for which the caller is requesting updated provider information. :param allocations: Dict of allocation data of the form: { $CONSUMER_UUID: { # The shape of each "allocations" dict below is identical # to the return from GET /allocations/{consumer_uuid} "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT, ... }, }, ... }, "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, }, ... } If None, and the method determines that any inventory needs to be moved (from one provider to another and/or to a different resource class), the ReshapeNeeded exception must be raised. Otherwise, this dict must be edited in place to indicate the desired final state of allocations. :raises ReshapeNeeded: If allocations is None and any inventory needs to be moved from one provider to another and/or to a different resource class. At this time the VMware driver does not reshape. :raises: ReshapeFailed if the requested tree reshape fails for whatever reason. """ # NOTE(cdent): This is a side-effecty method, we are changing the # the provider tree in place (on purpose). inv = provider_tree.data(nodename).inventory ratios = self._get_allocation_ratios(inv) stats = vm_util.get_stats_from_cluster(self._session, self._cluster_ref) datastores = ds_util.get_available_datastores(self._session, self._cluster_ref, self._datastore_regex) total_disk_capacity = sum([ds.capacity for ds in datastores]) max_free_space = max([ds.freespace for ds in datastores]) reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb( CONF.reserved_host_disk_mb) result = { orc.VCPU: { 'total': stats['cpu']['vcpus'], 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': stats['cpu']['max_vcpus_per_host'], 'step_size': 1, 'allocation_ratio': ratios[orc.VCPU], }, orc.MEMORY_MB: { 'total': stats['mem']['total'], 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': stats['mem']['max_mem_mb_per_host'], 'step_size': 1, 'allocation_ratio': ratios[orc.MEMORY_MB], }, } # If a sharing DISK_GB provider exists in the provider tree, then our # storage is shared, and we should not report the DISK_GB inventory in # the compute node provider. # TODO(cdent): We don't do this yet, in part because of the issues # in bug #1784020, but also because we can represent all datastores # as shared providers and should do once update_provider_tree is # working well. if provider_tree.has_sharing_provider(orc.DISK_GB): LOG.debug('Ignoring sharing provider - see bug #1784020') result[orc.DISK_GB] = { 'total': total_disk_capacity // units.Gi, 'reserved': reserved_disk_gb, 'min_unit': 1, 'max_unit': max_free_space // units.Gi, 'step_size': 1, 'allocation_ratio': ratios[orc.DISK_GB], } provider_tree.update_inventory(nodename, result)
def update_provider_tree(self, provider_tree, nodename, allocations=None): """Update a ProviderTree object with current resource provider, inventory information and CPU traits. :param nova.compute.provider_tree.ProviderTree provider_tree: A nova.compute.provider_tree.ProviderTree object representing all the providers in the tree associated with the compute node, and any sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE`` trait) associated via aggregate with any of those providers (but not *their* tree- or aggregate-associated providers), as currently known by placement. :param nodename: String name of the compute node (i.e. ComputeNode.hypervisor_hostname) for which the caller is requesting updated provider information. :param allocations: Dict of allocation data of the form: { $CONSUMER_UUID: { # The shape of each "allocations" dict below is identical # to the return from GET /allocations/{consumer_uuid} "allocations": { $RP_UUID: { "generation": $RP_GEN, "resources": { $RESOURCE_CLASS: $AMOUNT, ... }, }, ... }, "project_id": $PROJ_ID, "user_id": $USER_ID, "consumer_generation": $CONSUMER_GEN, }, ... } If None, and the method determines that any inventory needs to be moved (from one provider to another and/or to a different resource class), the ReshapeNeeded exception must be raised. Otherwise, this dict must be edited in place to indicate the desired final state of allocations. :raises ReshapeNeeded: If allocations is None and any inventory needs to be moved from one provider to another and/or to a different resource class. At this time the VMware driver does not reshape. :raises: ReshapeFailed if the requested tree reshape fails for whatever reason. """ # NOTE(cdent): This is a side-effecty method, we are changing the # the provider tree in place (on purpose). inv = provider_tree.data(nodename).inventory ratios = self._get_allocation_ratios(inv) stats = vm_util.get_stats_from_cluster(self._session, self._cluster_ref) datastores = ds_util.get_available_datastores(self._session, self._cluster_ref, self._datastore_regex) total_disk_capacity = sum([ds.capacity for ds in datastores]) max_free_space = max([ds.freespace for ds in datastores]) reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb( CONF.reserved_host_disk_mb) result = { orc.VCPU: { 'total': stats['cpu']['vcpus'], 'reserved': CONF.reserved_host_cpus, 'min_unit': 1, 'max_unit': stats['cpu']['max_vcpus_per_host'], 'step_size': 1, 'allocation_ratio': ratios[orc.VCPU], }, orc.MEMORY_MB: { 'total': stats['mem']['total'], 'reserved': CONF.reserved_host_memory_mb, 'min_unit': 1, 'max_unit': stats['mem']['max_mem_mb_per_host'], 'step_size': 1, 'allocation_ratio': ratios[orc.MEMORY_MB], }, } # If a sharing DISK_GB provider exists in the provider tree, then our # storage is shared, and we should not report the DISK_GB inventory in # the compute node provider. # TODO(cdent): We don't do this yet, in part because of the issues # in bug #1784020, but also because we can represent all datastores # as shared providers and should do once update_provider_tree is # working well. if provider_tree.has_sharing_provider(orc.DISK_GB): LOG.debug('Ignoring sharing provider - see bug #1784020') result[orc.DISK_GB] = { 'total': total_disk_capacity // units.Gi, 'reserved': reserved_disk_gb, 'min_unit': 1, 'max_unit': max_free_space // units.Gi, 'step_size': 1, 'allocation_ratio': ratios[orc.DISK_GB], } provider_tree.update_inventory(nodename, result)