示例#1
0
文件: utils.py 项目: openstack/nova
def resources_from_flavor(instance, flavor):
    """Convert a flavor into a set of resources for placement, taking into
    account boot-from-volume instances.

    This takes an instance and a flavor and returns a dict of
    resource_class:amount based on the attributes of the flavor, accounting for
    any overrides that are made in extra_specs.
    """
    is_bfv = compute_utils.is_volume_backed_instance(instance._context,
                                                     instance)
    swap_in_gb = compute_utils.convert_mb_to_ceil_gb(flavor.swap)
    disk = ((0 if is_bfv else flavor.root_gb) +
            swap_in_gb + flavor.ephemeral_gb)

    resources = {
        orc.VCPU: flavor.vcpus,
        orc.MEMORY_MB: flavor.memory_mb,
        orc.DISK_GB: disk,
    }
    if "extra_specs" in flavor:
        # TODO(efried): This method is currently only used from places that
        # assume the compute node is the only resource provider.  So for now,
        # we just merge together all the resources specified in the flavor and
        # pass them along.  This will need to be adjusted when nested and/or
        # shared RPs are in play.
        rreq = ResourceRequest.from_extra_specs(flavor.extra_specs)
        resources = rreq.merged_resources(flavor_resources=resources)

    return resources
示例#2
0
    def assert_hypervisor_usage(
        self,
        compute_node_uuid,
        flavor,
        volume_backed,
    ):
        """Asserts the given hypervisor's resource usage matches the
        given flavor (assumes a single instance on the hypervisor).

        :param compute_node_uuid: UUID of the ComputeNode to check.
        :param flavor: "flavor" entry dict from from GET /flavors/{flavor_id}
        :param volume_backed: True if the flavor is used with a volume-backed
            server, False otherwise.
        """
        # GET /os-hypervisors/{uuid} requires at least 2.53
        with utils.temporary_mutation(self.admin_api, microversion='2.53'):
            hypervisor = self.admin_api.api_get(
                '/os-hypervisors/%s' % compute_node_uuid).body['hypervisor']

        if volume_backed:
            expected_disk_usage = 0
        else:
            expected_disk_usage = flavor['disk']

        # Account for reserved_host_disk_mb.
        expected_disk_usage += compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        self.assertEqual(expected_disk_usage, hypervisor['local_gb_used'])
        # Account for reserved_host_memory_mb.
        expected_ram_usage = CONF.reserved_host_memory_mb + flavor['ram']
        self.assertEqual(expected_ram_usage, hypervisor['memory_mb_used'])
        # Account for reserved_host_cpus.
        expected_vcpu_usage = CONF.reserved_host_cpus + flavor['vcpus']
        self.assertEqual(expected_vcpu_usage, hypervisor['vcpus_used'])
示例#3
0
文件: utils.py 项目: panguan737/nova
def resources_from_flavor(instance, flavor):
    """Convert a flavor into a set of resources for placement, taking into
    account boot-from-volume instances.

    This takes an instance and a flavor and returns a dict of
    resource_class:amount based on the attributes of the flavor, accounting for
    any overrides that are made in extra_specs.
    """
    is_bfv = compute_utils.is_volume_backed_instance(instance._context,
                                                     instance)
    swap_in_gb = compute_utils.convert_mb_to_ceil_gb(flavor.swap)
    disk = ((0 if is_bfv else flavor.root_gb) + swap_in_gb +
            flavor.ephemeral_gb)

    resources = {
        fields.ResourceClass.VCPU: flavor.vcpus,
        fields.ResourceClass.MEMORY_MB: flavor.memory_mb,
        fields.ResourceClass.DISK_GB: disk,
    }
    if "extra_specs" in flavor:
        # TODO(efried): This method is currently only used from places that
        # assume the compute node is the only resource provider.  So for now,
        # we just merge together all the resources specified in the flavor and
        # pass them along.  This will need to be adjusted when nested and/or
        # shared RPs are in play.
        rreq = ResourceRequest.from_extra_specs(flavor.extra_specs)
        resources = rreq.merged_resources(flavor_resources=resources)

    return resources
示例#4
0
    def assert_hypervisor_usage(self, compute_node_uuid, flavor,
                                volume_backed):
        """Asserts the given hypervisor's resource usage matches the
        given flavor (assumes a single instance on the hypervisor).

        :param compute_node_uuid: UUID of the ComputeNode to check.
        :param flavor: "flavor" entry dict from from GET /flavors/{flavor_id}
        :param volume_backed: True if the flavor is used with a volume-backed
            server, False otherwise.
        """
        # GET /os-hypervisors/{uuid} requires at least 2.53
        with utils.temporary_mutation(self.admin_api, microversion='2.53'):
            hypervisor = self.admin_api.api_get(
                '/os-hypervisors/%s' % compute_node_uuid).body['hypervisor']
        if volume_backed:
            expected_disk_usage = 0
        else:
            expected_disk_usage = flavor['disk']
        # Account for reserved_host_disk_mb.
        expected_disk_usage += compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        self.assertEqual(expected_disk_usage, hypervisor['local_gb_used'])
        # Account for reserved_host_memory_mb.
        expected_ram_usage = CONF.reserved_host_memory_mb + flavor['ram']
        self.assertEqual(expected_ram_usage, hypervisor['memory_mb_used'])
        # Account for reserved_host_cpus.
        expected_vcpu_usage = CONF.reserved_host_cpus + flavor['vcpus']
        self.assertEqual(expected_vcpu_usage, hypervisor['vcpus_used'])
 def _get_reserved_host_values_from_config():
     return {
         'VCPU': CONF.reserved_host_cpus,
         'MEMORY_MB': CONF.reserved_host_memory_mb,
         'DISK_GB': compute_utils.convert_mb_to_ceil_gb(
             CONF.reserved_host_disk_mb)
     }
示例#6
0
 def _get_reserved_host_values_from_config():
     return {
         'VCPU': CONF.reserved_host_cpus,
         'MEMORY_MB': CONF.reserved_host_memory_mb,
         'DISK_GB': compute_utils.convert_mb_to_ceil_gb(
             CONF.reserved_host_disk_mb)
     }
示例#7
0
文件: hostops.py 项目: y00187570/nova
    def update_provider_tree(self, provider_tree, nodename,
                             allocation_ratios, allocations=None):
        resources = self.get_available_resource()

        inventory = {
            orc.VCPU: {
                'total': resources['vcpus'],
                'min_unit': 1,
                'max_unit': resources['vcpus'],
                'step_size': 1,
                'allocation_ratio': allocation_ratios[orc.VCPU],
                'reserved': CONF.reserved_host_cpus,
            },
            orc.MEMORY_MB: {
                'total': resources['memory_mb'],
                'min_unit': 1,
                'max_unit': resources['memory_mb'],
                'step_size': 1,
                'allocation_ratio': allocation_ratios[orc.MEMORY_MB],
                'reserved': CONF.reserved_host_memory_mb,
            },
            # TODO(lpetrut): once #1784020 is fixed, we can skip reporting
            # shared storage capacity
            orc.DISK_GB: {
                'total': resources['local_gb'],
                'min_unit': 1,
                'max_unit': resources['local_gb'],
                'step_size': 1,
                'allocation_ratio': allocation_ratios[orc.DISK_GB],
                'reserved': compute_utils.convert_mb_to_ceil_gb(
                    CONF.reserved_host_disk_mb),
            },
        }

        provider_tree.update_inventory(nodename, inventory)
示例#8
0
def _compute_node_to_inventory_dict(compute_node):
    """Given a supplied `objects.ComputeNode` object, return a dict, keyed
    by resource class, of various inventory information.

    :param compute_node: `objects.ComputeNode` object to translate
    """
    result = {}

    # NOTE(jaypipes): Ironic virt driver will return 0 values for vcpus,
    # memory_mb and disk_gb if the Ironic node is not available/operable
    if compute_node.vcpus > 0:
        result[VCPU] = {
            'total': compute_node.vcpus,
            'reserved': CONF.reserved_host_cpus,
            'min_unit': 1,
            'max_unit': compute_node.vcpus,
            'step_size': 1,
            'allocation_ratio': compute_node.cpu_allocation_ratio,
        }
    if compute_node.memory_mb > 0:
        result[MEMORY_MB] = {
            'total': compute_node.memory_mb,
            'reserved': CONF.reserved_host_memory_mb,
            'min_unit': 1,
            'max_unit': compute_node.memory_mb,
            'step_size': 1,
            'allocation_ratio': compute_node.ram_allocation_ratio,
        }
    if compute_node.local_gb > 0:
        # TODO(johngarbutt) We should either move to reserved_host_disk_gb
        # or start tracking DISK_MB.
        reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        result[DISK_GB] = {
            'total': compute_node.local_gb,
            'reserved': reserved_disk_gb,
            'min_unit': 1,
            'max_unit': compute_node.local_gb,
            'step_size': 1,
            'allocation_ratio': compute_node.disk_allocation_ratio,
        }
    return result
示例#9
0
def resources_from_flavor(instance, flavor):
    """Convert a flavor into a set of resources for placement, taking into
    account boot-from-volume instances.

    This takes an instance and a flavor and returns a dict of
    resource_class:amount based on the attributes of the flavor, accounting for
    any overrides that are made in extra_specs.
    """
    is_bfv = compute_utils.is_volume_backed_instance(instance._context,
                                                     instance)
    swap_in_gb = compute_utils.convert_mb_to_ceil_gb(flavor.swap)
    disk = ((0 if is_bfv else flavor.root_gb) + swap_in_gb +
            flavor.ephemeral_gb)

    resources = {
        fields.ResourceClass.VCPU: flavor.vcpus,
        fields.ResourceClass.MEMORY_MB: flavor.memory_mb,
        fields.ResourceClass.DISK_GB: disk,
    }
    if "extra_specs" in flavor:
        _process_extra_specs(flavor.extra_specs, resources)
    return resources
示例#10
0
文件: utils.py 项目: sapcc/nova
def resources_from_flavor(instance, flavor):
    """Convert a flavor into a set of resources for placement, taking into
    account boot-from-volume instances.

    This takes an instance and a flavor and returns a dict of
    resource_class:amount based on the attributes of the flavor, accounting for
    any overrides that are made in extra_specs.
    """
    is_bfv = compute_utils.is_volume_backed_instance(instance._context,
                                                     instance)
    swap_in_gb = compute_utils.convert_mb_to_ceil_gb(flavor.swap)
    disk = ((0 if is_bfv else flavor.root_gb) +
            swap_in_gb + flavor.ephemeral_gb)

    resources = {
        fields.ResourceClass.VCPU: flavor.vcpus,
        fields.ResourceClass.MEMORY_MB: flavor.memory_mb,
        fields.ResourceClass.DISK_GB: disk,
    }
    if "extra_specs" in flavor:
        _process_extra_specs(flavor.extra_specs, resources)
    return resources
示例#11
0
 def get_inventory(self, nodename):
     """Return a dict, keyed by resource class, of inventory information for
     the supplied node.
     """
     stats = vm_util.get_stats_from_cluster(self._session,
                                            self._cluster_ref)
     datastores = ds_util.get_available_datastores(self._session,
                                                   self._cluster_ref,
                                                   self._datastore_regex)
     total_disk_capacity = sum([ds.capacity for ds in datastores])
     max_free_space = max([ds.freespace for ds in datastores])
     reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
         CONF.reserved_host_disk_mb)
     result = {
         obj_fields.ResourceClass.VCPU: {
             'total': stats['cpu']['vcpus'],
             'reserved': CONF.reserved_host_cpus,
             'min_unit': 1,
             'max_unit': stats['cpu']['max_vcpus_per_host'],
             'step_size': 1,
         },
         obj_fields.ResourceClass.MEMORY_MB: {
             'total': stats['mem']['total'],
             'reserved': CONF.reserved_host_memory_mb,
             'min_unit': 1,
             'max_unit': stats['mem']['max_mem_mb_per_host'],
             'step_size': 1,
         },
         obj_fields.ResourceClass.DISK_GB: {
             'total': total_disk_capacity // units.Gi,
             'reserved': reserved_disk_gb,
             'min_unit': 1,
             'max_unit': max_free_space // units.Gi,
             'step_size': 1,
         },
     }
     return result
示例#12
0
文件: driver.py 项目: klmitch/nova
 def get_inventory(self, nodename):
     """Return a dict, keyed by resource class, of inventory information for
     the supplied node.
     """
     stats = vm_util.get_stats_from_cluster(self._session,
                                            self._cluster_ref)
     datastores = ds_util.get_available_datastores(self._session,
                                                   self._cluster_ref,
                                                   self._datastore_regex)
     total_disk_capacity = sum([ds.capacity for ds in datastores])
     max_free_space = max([ds.freespace for ds in datastores])
     reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
         CONF.reserved_host_disk_mb)
     result = {
         fields.ResourceClass.VCPU: {
             'total': stats['cpu']['vcpus'],
             'reserved': CONF.reserved_host_cpus,
             'min_unit': 1,
             'max_unit': stats['cpu']['max_vcpus_per_host'],
             'step_size': 1,
         },
         fields.ResourceClass.MEMORY_MB: {
             'total': stats['mem']['total'],
             'reserved': CONF.reserved_host_memory_mb,
             'min_unit': 1,
             'max_unit': stats['mem']['max_mem_mb_per_host'],
             'step_size': 1,
         },
         fields.ResourceClass.DISK_GB: {
             'total': total_disk_capacity // units.Gi,
             'reserved': reserved_disk_gb,
             'min_unit': 1,
             'max_unit': max_free_space // units.Gi,
             'step_size': 1,
         },
     }
     return result
示例#13
0
    def update_provider_tree(self, provider_tree, nodename, allocations=None):
        """Update a ProviderTree object with current resource provider,
        inventory information and CPU traits.

        :param nova.compute.provider_tree.ProviderTree provider_tree:
            A nova.compute.provider_tree.ProviderTree object representing all
            the providers in the tree associated with the compute node, and any
            sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
            trait) associated via aggregate with any of those providers (but
            not *their* tree- or aggregate-associated providers), as currently
            known by placement.
        :param nodename:
            String name of the compute node (i.e.
            ComputeNode.hypervisor_hostname) for which the caller is requesting
            updated provider information.
        :param allocations:
            Dict of allocation data of the form:
              { $CONSUMER_UUID: {
                    # The shape of each "allocations" dict below is identical
                    # to the return from GET /allocations/{consumer_uuid}
                    "allocations": {
                        $RP_UUID: {
                            "generation": $RP_GEN,
                            "resources": {
                                $RESOURCE_CLASS: $AMOUNT,
                                ...
                            },
                        },
                        ...
                    },
                    "project_id": $PROJ_ID,
                    "user_id": $USER_ID,
                    "consumer_generation": $CONSUMER_GEN,
                },
                ...
              }
            If None, and the method determines that any inventory needs to be
            moved (from one provider to another and/or to a different resource
            class), the ReshapeNeeded exception must be raised. Otherwise, this
            dict must be edited in place to indicate the desired final state of
            allocations.
        :raises ReshapeNeeded: If allocations is None and any inventory needs
            to be moved from one provider to another and/or to a different
            resource class. At this time the VMware driver does not reshape.
        :raises: ReshapeFailed if the requested tree reshape fails for
            whatever reason.
        """
        # NOTE(cdent): This is a side-effecty method, we are changing the
        # the provider tree in place (on purpose).
        inv = provider_tree.data(nodename).inventory
        ratios = self._get_allocation_ratios(inv)
        stats = vm_util.get_stats_from_cluster(self._session,
                                               self._cluster_ref)
        datastores = ds_util.get_available_datastores(self._session,
                                                      self._cluster_ref,
                                                      self._datastore_regex)
        total_disk_capacity = sum([ds.capacity for ds in datastores])
        max_free_space = max([ds.freespace for ds in datastores])
        reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        result = {
            orc.VCPU: {
                'total': stats['cpu']['vcpus'],
                'reserved': CONF.reserved_host_cpus,
                'min_unit': 1,
                'max_unit': stats['cpu']['max_vcpus_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.VCPU],
            },
            orc.MEMORY_MB: {
                'total': stats['mem']['total'],
                'reserved': CONF.reserved_host_memory_mb,
                'min_unit': 1,
                'max_unit': stats['mem']['max_mem_mb_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.MEMORY_MB],
            },
        }

        # If a sharing DISK_GB provider exists in the provider tree, then our
        # storage is shared, and we should not report the DISK_GB inventory in
        # the compute node provider.
        # TODO(cdent): We don't do this yet, in part because of the issues
        # in bug #1784020, but also because we can represent all datastores
        # as shared providers and should do once update_provider_tree is
        # working well.
        if provider_tree.has_sharing_provider(orc.DISK_GB):
            LOG.debug('Ignoring sharing provider - see bug #1784020')
        result[orc.DISK_GB] = {
            'total': total_disk_capacity // units.Gi,
            'reserved': reserved_disk_gb,
            'min_unit': 1,
            'max_unit': max_free_space // units.Gi,
            'step_size': 1,
            'allocation_ratio': ratios[orc.DISK_GB],
        }

        provider_tree.update_inventory(nodename, result)
示例#14
0
    def __init__(self, request_spec):
        """Create a new instance of ResourceRequest from a RequestSpec.

        Examines the flavor, flavor extra specs, and (optional) image metadata
        of the provided ``request_spec``.

        For extra specs, items of the following form are examined:

        - ``resources:$RESOURCE_CLASS``: $AMOUNT
        - ``resources$N:$RESOURCE_CLASS``: $AMOUNT
        - ``trait:$TRAIT_NAME``: "required"
        - ``trait$N:$TRAIT_NAME``: "required"

        .. note::

            This does *not* yet handle ``member_of[$N]``.

        For image metadata, traits are extracted from the ``traits_required``
        property, if present.

        For the flavor, ``VCPU``, ``MEMORY_MB`` and ``DISK_GB`` are calculated
        from Flavor properties, though these are only used if they aren't
        overridden by flavor extra specs.

        :param request_spec: An instance of ``objects.RequestSpec``.
        """
        # { ident: RequestGroup }
        self._rg_by_id = {}
        self._group_policy = None
        # Default to the configured limit but _limit can be
        # set to None to indicate "no limit".
        self._limit = CONF.scheduler.max_placement_results

        # TODO(efried): Handle member_of[$N], which will need to be reconciled
        # with destination.aggregates handling in resources_from_request_spec

        image = (request_spec.image if 'image' in request_spec else
                 objects.ImageMeta(properties=objects.ImageMetaProps()))

        # Parse the flavor extra specs
        self._process_extra_specs(request_spec.flavor)

        self.numbered_groups_from_flavor = self.get_num_of_numbered_groups()

        # Now parse the (optional) image metadata
        self._process_image_meta(image)

        # Finally, parse the flavor itself, though we'll only use these fields
        # if they don't conflict with something already provided by the flavor
        # extra specs. These are all added to the unnumbered request group.
        merged_resources = self.merged_resources()

        if orc.VCPU not in merged_resources:
            self._add_resource(None, orc.VCPU, request_spec.vcpus)

        if orc.MEMORY_MB not in merged_resources:
            self._add_resource(None, orc.MEMORY_MB, request_spec.memory_mb)

        if orc.DISK_GB not in merged_resources:
            disk = request_spec.ephemeral_gb
            disk += compute_utils.convert_mb_to_ceil_gb(request_spec.swap)
            if 'is_bfv' not in request_spec or not request_spec.is_bfv:
                disk += request_spec.root_gb

            if disk:
                self._add_resource(None, orc.DISK_GB, disk)

        self._translate_memory_encryption(request_spec.flavor, image)

        self.strip_zeros()
示例#15
0
文件: driver.py 项目: mahak/nova
    def update_provider_tree(self, provider_tree, nodename, allocations=None):
        """Update a ProviderTree object with current resource provider,
        inventory information and CPU traits.

        :param nova.compute.provider_tree.ProviderTree provider_tree:
            A nova.compute.provider_tree.ProviderTree object representing all
            the providers in the tree associated with the compute node, and any
            sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
            trait) associated via aggregate with any of those providers (but
            not *their* tree- or aggregate-associated providers), as currently
            known by placement.
        :param nodename:
            String name of the compute node (i.e.
            ComputeNode.hypervisor_hostname) for which the caller is requesting
            updated provider information.
        :param allocations:
            Dict of allocation data of the form:
              { $CONSUMER_UUID: {
                    # The shape of each "allocations" dict below is identical
                    # to the return from GET /allocations/{consumer_uuid}
                    "allocations": {
                        $RP_UUID: {
                            "generation": $RP_GEN,
                            "resources": {
                                $RESOURCE_CLASS: $AMOUNT,
                                ...
                            },
                        },
                        ...
                    },
                    "project_id": $PROJ_ID,
                    "user_id": $USER_ID,
                    "consumer_generation": $CONSUMER_GEN,
                },
                ...
              }
            If None, and the method determines that any inventory needs to be
            moved (from one provider to another and/or to a different resource
            class), the ReshapeNeeded exception must be raised. Otherwise, this
            dict must be edited in place to indicate the desired final state of
            allocations.
        :raises ReshapeNeeded: If allocations is None and any inventory needs
            to be moved from one provider to another and/or to a different
            resource class. At this time the VMware driver does not reshape.
        :raises: ReshapeFailed if the requested tree reshape fails for
            whatever reason.
        """
        # NOTE(cdent): This is a side-effecty method, we are changing the
        # the provider tree in place (on purpose).
        inv = provider_tree.data(nodename).inventory
        ratios = self._get_allocation_ratios(inv)
        stats = vm_util.get_stats_from_cluster(self._session,
                                               self._cluster_ref)
        datastores = ds_util.get_available_datastores(self._session,
                                                      self._cluster_ref,
                                                      self._datastore_regex)
        total_disk_capacity = sum([ds.capacity for ds in datastores])
        max_free_space = max([ds.freespace for ds in datastores])
        reserved_disk_gb = compute_utils.convert_mb_to_ceil_gb(
            CONF.reserved_host_disk_mb)
        result = {
            orc.VCPU: {
                'total': stats['cpu']['vcpus'],
                'reserved': CONF.reserved_host_cpus,
                'min_unit': 1,
                'max_unit': stats['cpu']['max_vcpus_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.VCPU],
            },
            orc.MEMORY_MB: {
                'total': stats['mem']['total'],
                'reserved': CONF.reserved_host_memory_mb,
                'min_unit': 1,
                'max_unit': stats['mem']['max_mem_mb_per_host'],
                'step_size': 1,
                'allocation_ratio': ratios[orc.MEMORY_MB],
            },
        }

        # If a sharing DISK_GB provider exists in the provider tree, then our
        # storage is shared, and we should not report the DISK_GB inventory in
        # the compute node provider.
        # TODO(cdent): We don't do this yet, in part because of the issues
        # in bug #1784020, but also because we can represent all datastores
        # as shared providers and should do once update_provider_tree is
        # working well.
        if provider_tree.has_sharing_provider(orc.DISK_GB):
            LOG.debug('Ignoring sharing provider - see bug #1784020')
        result[orc.DISK_GB] = {
            'total': total_disk_capacity // units.Gi,
            'reserved': reserved_disk_gb,
            'min_unit': 1,
            'max_unit': max_free_space // units.Gi,
            'step_size': 1,
            'allocation_ratio': ratios[orc.DISK_GB],
        }

        provider_tree.update_inventory(nodename, result)