def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if instance_type is None: if isinstance(instance, objects.Instance): instance_type = instance.get_flavor() else: instance_type = flavors.extract_flavor(instance) if isinstance(instance, objects.Instance): instance = instance_obj.compat_instance(instance) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances)} return jsonutils.to_primitive(request_spec)
def _update_usage_from_instance(self, context, resources, instance): """Update usage for a single instance.""" uuid = instance['uuid'] is_new_instance = uuid not in self.tracked_instances is_deleted_instance = instance['vm_state'] == vm_states.DELETED if is_new_instance: self.tracked_instances[uuid] = instance_obj.compat_instance( instance) sign = 1 if is_deleted_instance: self.tracked_instances.pop(uuid) sign = -1 self.stats.update_stats_for_instance(instance) if self.pci_tracker: self.pci_tracker.update_pci_for_instance(context, instance) # if it's a new or deleted instance: if is_new_instance or is_deleted_instance: # new instance, update compute node resource usage: self._update_usage(context, resources, instance, sign=sign) resources['current_workload'] = self.stats.calculate_workload() if self.pci_tracker: resources['pci_device_pools'] = self.pci_tracker.stats else: resources['pci_device_pools'] = []
def _get_usage_dict(self, object_or_dict, **updates): """Make a usage dict _update methods expect. Accepts a dict or an Instance or Flavor object, and a set of updates. Converts the object to a dict and applies the updates. :param object_or_dict: instance or flavor as an object or just a dict :param updates: key-value pairs to update the passed object. Currently only considers 'numa_topology', all other keys are ignored. :returns: a dict with all the information from object_or_dict updated with updates """ usage = {} if isinstance(object_or_dict, objects.Instance): usage = instance_obj.compat_instance(object_or_dict) elif isinstance(object_or_dict, objects.Flavor): usage = obj_base.obj_to_primitive(object_or_dict) else: usage.update(object_or_dict) for key in ('numa_topology',): if key in updates: usage[key] = updates[key] return usage
def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if instance_type is None: if isinstance(instance, objects.Instance): instance_type = instance.get_flavor() else: instance_type = flavors.extract_flavor(instance) if isinstance(instance, objects.Instance): instance = instance_obj.compat_instance(instance) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances) } return jsonutils.to_primitive(request_spec)
def _update_usage_from_instance(self, context, instance): """Update usage for a single instance.""" uuid = instance['uuid'] is_new_instance = uuid not in self.tracked_instances is_deleted_instance = instance['vm_state'] == vm_states.DELETED if is_new_instance: self.tracked_instances[uuid] = instance_obj.compat_instance( instance) sign = 1 if is_deleted_instance: self.tracked_instances.pop(uuid) sign = -1 self.stats.update_stats_for_instance(instance) if self.pci_tracker: self.pci_tracker.update_pci_for_instance(context, instance) # if it's a new or deleted instance: if is_new_instance or is_deleted_instance: # new instance, update compute node resource usage: self._update_usage(instance, sign=sign) self.compute_node['current_workload'] = self.stats.calculate_workload() if self.pci_tracker: self.compute_node['pci_device_pools'] = self.pci_tracker.stats else: self.compute_node['pci_device_pools'] = []
def resize_claim(self, context, instance, instance_type, image_meta=None, limits=None): """Indicate that resources are needed for a resize operation to this compute host. :param context: security context :param instance: instance object to reserve resources for :param instance_type: new instance_type being resized to :param limits: Dict of oversubscription limits for memory, disk, and CPUs :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ image_meta = image_meta or {} if self.disabled: # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: migration = self._create_migration(context, instance, instance_type) return claims.NopClaim(migration=migration) # get memory overhead required to build this instance: overhead = self.driver.estimate_instance_overhead(instance_type) LOG.debug( "Memory overhead for %(flavor)d MB instance; %(overhead)d " "MB", { 'flavor': instance_type['memory_mb'], 'overhead': overhead['memory_mb'] }) instance_ref = instance_obj.compat_instance(instance) claim = claims.ResizeClaim(context, instance_ref, instance_type, image_meta, self, self.compute_node, overhead=overhead, limits=limits) migration = self._create_migration(context, instance_ref, instance_type) claim.migration = migration # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(context, instance_ref, image_meta, self.compute_node, migration) elevated = context.elevated() self._update(elevated, self.compute_node) return claim
def resize_claim(self, context, instance, instance_type, image_meta=None, limits=None): """Indicate that resources are needed for a resize operation to this compute host. :param context: security context :param instance: instance object to reserve resources for :param instance_type: new instance_type being resized to :param limits: Dict of oversubscription limits for memory, disk, and CPUs :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ image_meta = image_meta or {} if self.disabled: # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: migration = self._create_migration(context, instance, instance_type) return claims.NopClaim(migration=migration) # get memory overhead required to build this instance: overhead = self.driver.estimate_instance_overhead(instance_type) LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d " "MB", {'flavor': instance_type['memory_mb'], 'overhead': overhead['memory_mb']}) instance_ref = instance_obj.compat_instance(instance) claim = claims.ResizeClaim(context, instance_ref, instance_type, image_meta, self, self.compute_node, overhead=overhead, limits=limits) migration = self._create_migration(context, instance_ref, instance_type) claim.migration = migration # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(context, instance_ref, image_meta, self.compute_node, migration) elevated = context.elevated() self._update(elevated, self.compute_node) return claim