def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown): image_ref = instance.image_ref image = compute_utils.get_image_metadata(context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec(context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) scheduler_utils.populate_retry(filter_properties, instance["uuid"]) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor["id"] == instance["instance_type_id"]: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {"vm_state": vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback() try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop("context", None) (host, node) = (host_state["host"], host_state["nodename"]) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node, clean_shutdown=clean_shutdown, ) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {"vm_state": instance.vm_state, "task_state": None} self._set_vm_state_and_notify(context, instance.uuid, "migrate_server", updates, ex, request_spec) quotas.rollback()
def unshelve_instance(self, context, instance): sys_meta = instance.system_metadata def safe_image_show(ctx, image_id): if image_id: return self.image_api.get(ctx, image_id, show_deleted=False) else: raise exception.ImageNotFound(image_id='') if instance.vm_state == vm_states.SHELVED: instance.task_state = task_states.POWERING_ON instance.save(expected_task_state=task_states.UNSHELVING) self.compute_rpcapi.start_instance(context, instance) snapshot_id = sys_meta.get('shelved_image_id') if snapshot_id: self._delete_image(context, snapshot_id) elif instance.vm_state == vm_states.SHELVED_OFFLOADED: image = None image_id = sys_meta.get('shelved_image_id') # No need to check for image if image_id is None as # "shelved_image_id" key is not set for volume backed # instance during the shelve process if image_id: with compute_utils.EventReporter( context, 'get_image_info', instance.uuid): try: image = safe_image_show(context, image_id) except exception.ImageNotFound: instance.vm_state = vm_states.ERROR instance.save() reason = _('Unshelve attempted but the image %s ' 'cannot be found.') % image_id LOG.error(reason, instance=instance) raise exception.UnshelveException( instance_id=instance.uuid, reason=reason) try: with compute_utils.EventReporter(context, 'schedule_instances', instance.uuid): filter_properties = {} scheduler_utils.populate_retry(filter_properties, instance.uuid) hosts = self._schedule_instances(context, image, filter_properties, instance) host_state = hosts[0] scheduler_utils.populate_filter_properties( filter_properties, host_state) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.unshelve_instance( context, instance, host, image=image, filter_properties=filter_properties, node=node) except (exception.NoValidHost, exception.UnsupportedPolicyException): instance.task_state = None instance.save() LOG.warning(_LW("No valid host found for unshelve instance"), instance=instance) return except Exception: with excutils.save_and_reraise_exception(): instance.task_state = None instance.save() LOG.error(_LE("Unshelve attempted but an error " "has occurred"), instance=instance) else: LOG.error(_LE('Unshelve attempted but vm_state not SHELVED or ' 'SHELVED_OFFLOADED'), instance=instance) instance.vm_state = vm_states.ERROR instance.save() return for key in ['shelved_at', 'shelved_image_id', 'shelved_host']: if key in sys_meta: del(sys_meta[key]) instance.system_metadata = sys_meta instance.save()
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec(context, image, instances) # TODO(danms): Remove this in version 2.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) # TODO(melwitt): Remove this in version 2.0 of the RPC API flavor = filter_properties.get('instance_type') if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) filter_properties = dict(filter_properties, instance_type=flavor) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) except Exception as exc: updates = {'vm_state': vm_states.ERROR, 'task_state': None} for instance in instances: self._set_vm_state_and_notify( context, instance.uuid, 'build_instances', updates, exc, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug('Instance deleted during build', instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self.compute_rpcapi.build_and_run_instance(context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits'])
def _cold_migrate(self, context, instance, flavor, filter_properties, reservations, clean_shutdown): image_ref = instance.image_ref image = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) request_spec = scheduler_utils.build_request_spec( context, image, [instance], instance_type=flavor) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) scheduler_utils.populate_retry(filter_properties, instance['uuid']) hosts = self.scheduler_client.select_destinations( context, request_spec, filter_properties) host_state = hosts[0] except exception.NoValidHost as ex: vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) quotas.rollback() # if the flavor IDs match, it's migrate; otherwise resize if flavor['id'] == instance['instance_type_id']: msg = _("No valid host found for cold migrate") else: msg = _("No valid host found for resize") raise exception.NoValidHost(reason=msg) except exception.UnsupportedPolicyException as ex: with excutils.save_and_reraise_exception(): vm_state = instance.vm_state if not vm_state: vm_state = vm_states.ACTIVE updates = {'vm_state': vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) quotas.rollback() try: scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop('context', None) (host, node) = (host_state['host'], host_state['nodename']) self.compute_rpcapi.prep_resize( context, image, instance, flavor, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node, clean_shutdown=clean_shutdown) except Exception as ex: with excutils.save_and_reraise_exception(): updates = {'vm_state': instance.vm_state, 'task_state': None} self._set_vm_state_and_notify(context, instance.uuid, 'migrate_server', updates, ex, request_spec) quotas.rollback()
def unshelve_instance(self, context, instance): sys_meta = instance.system_metadata def safe_image_show(ctx, image_id): if image_id: return self.image_api.get(ctx, image_id, show_deleted=False) else: raise exception.ImageNotFound(image_id="") if instance.vm_state == vm_states.SHELVED: instance.task_state = task_states.POWERING_ON instance.save(expected_task_state=task_states.UNSHELVING) self.compute_rpcapi.start_instance(context, instance) snapshot_id = sys_meta.get("shelved_image_id") if snapshot_id: self._delete_image(context, snapshot_id) elif instance.vm_state == vm_states.SHELVED_OFFLOADED: image = None image_id = sys_meta.get("shelved_image_id") # No need to check for image if image_id is None as # "shelved_image_id" key is not set for volume backed # instance during the shelve process if image_id: with compute_utils.EventReporter(context, "get_image_info", instance.uuid): try: image = safe_image_show(context, image_id) except exception.ImageNotFound: instance.vm_state = vm_states.ERROR instance.save() reason = _("Unshelve attempted but the image %s " "cannot be found.") % image_id LOG.error(reason, instance=instance) raise exception.UnshelveException(instance_id=instance.uuid, reason=reason) try: with compute_utils.EventReporter(context, "schedule_instances", instance.uuid): filter_properties = {} scheduler_utils.populate_retry(filter_properties, instance.uuid) hosts = self._schedule_instances(context, image, filter_properties, instance) host_state = hosts[0] scheduler_utils.populate_filter_properties(filter_properties, host_state) (host, node) = (host_state["host"], host_state["nodename"]) self.compute_rpcapi.unshelve_instance( context, instance, host, image=image, filter_properties=filter_properties, node=node ) except (exception.NoValidHost, exception.UnsupportedPolicyException): instance.task_state = None instance.save() LOG.warning(_LW("No valid host found for unshelve instance"), instance=instance) return except Exception: with excutils.save_and_reraise_exception(): instance.task_state = None instance.save() LOG.error(_LE("Unshelve attempted but an error " "has occurred"), instance=instance) else: LOG.error(_LE("Unshelve attempted but vm_state not SHELVED or " "SHELVED_OFFLOADED"), instance=instance) instance.vm_state = vm_states.ERROR instance.save() return for key in ["shelved_at", "shelved_image_id", "shelved_host"]: if key in sys_meta: del (sys_meta[key]) instance.system_metadata = sys_meta instance.save()
def build_instances( self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping=None, legacy_bdm=True, ): # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version # 2.0 of the RPC API. request_spec = scheduler_utils.build_request_spec(context, image, instances) # TODO(danms): Remove this in version 2.0 of the RPC API if requested_networks and not isinstance(requested_networks, objects.NetworkRequestList): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks] ) # TODO(melwitt): Remove this in version 2.0 of the RPC API flavor = filter_properties.get("instance_type") if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor["id"]) filter_properties = dict(filter_properties, instance_type=flavor) try: scheduler_utils.setup_instance_group(context, request_spec, filter_properties) # check retry policy. Rather ugly use of instances[0]... # but if we've exceeded max retries... then we really only # have a single instance. scheduler_utils.populate_retry(filter_properties, instances[0].uuid) hosts = self.scheduler_client.select_destinations(context, request_spec, filter_properties) except Exception as exc: updates = {"vm_state": vm_states.ERROR, "task_state": None} for instance in instances: self._set_vm_state_and_notify(context, instance.uuid, "build_instances", updates, exc, request_spec) return for (instance, host) in itertools.izip(instances, hosts): try: instance.refresh() except (exception.InstanceNotFound, exception.InstanceInfoCacheNotFound): LOG.debug("Instance deleted during build", instance=instance) continue local_filter_props = copy.deepcopy(filter_properties) scheduler_utils.populate_filter_properties(local_filter_props, host) # The block_device_mapping passed from the api doesn't contain # instance specific information bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(context, instance.uuid) self.compute_rpcapi.build_and_run_instance( context, instance=instance, host=host["host"], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host["nodename"], limits=host["limits"], )