def test_live_migration_no_instance_numa_topology(self): instance_type = self._fake_instance_type() instance = self._fake_instance() instance.numa_topology = None claims.MoveClaim(self.context, instance, _NODENAME, instance_type, {}, self.tracker, self.resources, self.empty_requests, objects.Migration(migration_type='live-migration'), None)
def get_claim(mock_extra_get, mock_numa_get, mock_pci_get): return claims.MoveClaim(self.context, self.instance, instance_type, {}, self.tracker, self.resources, overhead=overhead, limits=limits)
def test_live_migration_no_instance_numa_topology(self): flavor = self._fake_flavor() instance = self._fake_instance() instance.numa_topology = None claims.MoveClaim(self.context, instance, _NODENAME, flavor, {}, self.tracker, self.compute_node, self.empty_requests, objects.Migration(migration_type='live-migration'), None)
def _move_claim(self, context, instance, new_instance_type, move_type=None, image_meta=None, limits=None, migration=None): """Indicate that resources are needed for a move to this host. Move can be either a migrate/resize, live-migrate or an evacuate/rebuild operation. :param context: security context :param instance: instance object to reserve resources for :param new_instance_type: new instance_type being resized to :param image_meta: instance image metadata :param move_type: move type - can be one of 'migration', 'resize', 'live-migration', 'evacuate' :param limits: Dict of oversubscription limits for memory, disk, and CPUs :param migration: A migration object if one was already created elsewhere for this operation :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ image_meta = image_meta or {} if migration: self._claim_existing_migration(migration) else: migration = self._create_migration(context, instance, new_instance_type, move_type) if self.disabled: # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: return claims.NopClaim(migration=migration) # get memory overhead required to build this instance: overhead = self.driver.estimate_instance_overhead(new_instance_type) LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d " "MB", {'flavor': new_instance_type.memory_mb, 'overhead': overhead['memory_mb']}) pci_requests = objects.InstancePCIRequests.\ get_by_instance_uuid_and_newness( context, instance.uuid, True) claim = claims.MoveClaim(context, instance, new_instance_type, image_meta, self, self.compute_node, pci_requests, overhead=overhead, limits=limits,flavor=instance.flavor) claim.migration = migration instance.migration_context = claim.create_migration_context() instance.save() # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(context, instance, image_meta, migration) elevated = context.elevated() self._update(elevated) return claim
def get_claim(mock_extra_get, mock_numa_get): return claims.MoveClaim(self.context, self.instance, _NODENAME, instance_type, image_meta, self.tracker, self.resources, requests, limits=limits)
def resize_claim(self, context, instance, instance_type, image_meta=None, limits=None): """Indicate that resources are needed for a resize operation to this compute host. :param context: security context :param instance: instance object to reserve resources for :param instance_type: new instance_type being resized to :param limits: Dict of oversubscription limits for memory, disk, and CPUs :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ image_meta = image_meta or {} if self.disabled: # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: migration = self._create_migration(context, instance, instance_type) return claims.NopClaim(migration=migration) # get memory overhead required to build this instance: overhead = self.driver.estimate_instance_overhead(instance_type) LOG.debug( "Memory overhead for %(flavor)d MB instance; %(overhead)d " "MB", { 'flavor': instance_type.memory_mb, 'overhead': overhead['memory_mb'] }) claim = claims.MoveClaim(context, instance, instance_type, image_meta, self, self.compute_node, overhead=overhead, limits=limits) migration = self._create_migration(context, instance, instance_type) claim.migration = migration # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(context, instance, image_meta, migration) elevated = context.elevated() self._update(elevated) return claim
def _claim(self, limits=None, overhead=None, **kwargs): numa_constraint = kwargs.pop('numa_topology', None) instance_type = self._fake_instance_type(**kwargs) if overhead is None: overhead = {'memory_mb': 0} with mock.patch( 'nova.virt.hardware.numa_get_constraints', return_value=numa_constraint): return claims.MoveClaim(self.context, self.instance, instance_type, {}, self.tracker, self.resources, overhead=overhead, limits=limits)
def get_claim(mock_extra_get, mock_numa_get): return claims.MoveClaim( self.context, self.instance, _NODENAME, instance_type, image_meta, self.tracker, self.compute_node, requests, objects.Migration(migration_type='migration'), limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename, move_type=None, image_meta=None, limits=None, migration=None): """Indicate that resources are needed for a move to this host. Move can be either a migrate/resize, live-migrate or an evacuate/rebuild operation. :param context: security context :param instance: instance object to reserve resources for :param new_instance_type: new instance_type being resized to :param nodename: The Ironic nodename selected by the scheduler :param image_meta: instance image metadata :param move_type: move type - can be one of 'migration', 'resize', 'live-migration', 'evacuate' :param limits: Dict of oversubscription limits for memory, disk, and CPUs :param migration: A migration object if one was already created elsewhere for this operation :returns: A Claim ticket representing the reserved resources. This should be turned into finalize a resource claim or free resources after the compute operation is finished. """ image_meta = image_meta or {} if migration: self._claim_existing_migration(migration, nodename) else: migration = self._create_migration(context, instance, new_instance_type, nodename, move_type) if self.disabled(nodename): # compute_driver doesn't support resource tracking, just # generate the migration record and continue the resize: return claims.NopClaim(migration=migration) # get memory overhead required to build this instance: overhead = self.driver.estimate_instance_overhead(new_instance_type) LOG.debug( "Memory overhead for %(flavor)d MB instance; %(overhead)d " "MB", { 'flavor': new_instance_type.memory_mb, 'overhead': overhead['memory_mb'] }) LOG.debug( "Disk overhead for %(flavor)d GB instance; %(overhead)d " "GB", { 'flavor': instance.flavor.root_gb, 'overhead': overhead.get('disk_gb', 0) }) LOG.debug( "CPU overhead for %(flavor)d vCPUs instance; %(overhead)d " "vCPU(s)", { 'flavor': instance.flavor.vcpus, 'overhead': overhead.get('vcpus', 0) }) cn = self.compute_nodes[nodename] # TODO(moshele): we are recreating the pci requests even if # there was no change on resize. This will cause allocating # the old/new pci device in the resize phase. In the future # we would like to optimise this. new_pci_requests = pci_request.get_pci_requests_from_flavor( new_instance_type) new_pci_requests.instance_uuid = instance.uuid # PCI requests come from two sources: instance flavor and # SR-IOV ports. SR-IOV ports pci_request don't have an alias_name. # On resize merge the SR-IOV ports pci_requests with the new # instance flavor pci_requests. if instance.pci_requests: for request in instance.pci_requests.requests: if request.alias_name is None: new_pci_requests.requests.append(request) claim = claims.MoveClaim(context, instance, nodename, new_instance_type, image_meta, self, cn, new_pci_requests, overhead=overhead, limits=limits) claim.migration = migration claimed_pci_devices_objs = [] if self.pci_tracker: # NOTE(jaypipes): ComputeNode.pci_device_pools is set below # in _update_usage_from_instance(). claimed_pci_devices_objs = self.pci_tracker.claim_instance( context, new_pci_requests, claim.claimed_numa_topology) claimed_pci_devices = objects.PciDeviceList( objects=claimed_pci_devices_objs) # TODO(jaypipes): Move claimed_numa_topology out of the Claim's # constructor flow so the Claim constructor only tests whether # resources can be claimed, not consume the resources directly. mig_context = objects.MigrationContext( context=context, instance_uuid=instance.uuid, migration_id=migration.id, old_numa_topology=instance.numa_topology, new_numa_topology=claim.claimed_numa_topology, old_pci_devices=instance.pci_devices, new_pci_devices=claimed_pci_devices, old_pci_requests=instance.pci_requests, new_pci_requests=new_pci_requests) instance.migration_context = mig_context instance.save() # Mark the resources in-use for the resize landing on this # compute host: self._update_usage_from_migration(context, instance, migration, nodename) elevated = context.elevated() self._update(elevated, cn) return claim