def execute(self, context, request_spec, filter_properties): try: self.driver_api.schedule_create_volume(context, request_spec, filter_properties) except exception.NoValidHost as e: # No host found happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do not* reraise the error (since whats the point). try: self._handle_failure(context, request_spec, e) finally: common.error_out_volume(context, self.db_api, request_spec['volume_id'], reason=e) except Exception as e: # Some other error happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do* reraise the error. with excutils.save_and_reraise_exception(): try: self._handle_failure(context, request_spec, e) finally: common.error_out_volume(context, self.db_api, request_spec['volume_id'], reason=e)
def execute(self, context, volume_ref, manage_existing_ref): volume_id = volume_ref.id if not self.driver.initialized: driver_name = self.driver.__class__.__name__ LOG.error( _LE("Unable to manage existing volume. " "Volume driver %s not initialized.") % driver_name) flow_common.error_out_volume(context, self.db, volume_id, reason=_("Volume driver %s " "not initialized.") % driver_name) raise exception.DriverNotInitialized() size = self.driver.manage_existing_get_size(volume_ref, manage_existing_ref) return { 'size': size, 'volume_type_id': volume_ref.volume_type_id, 'volume_properties': volume_ref, 'volume_spec': { 'status': volume_ref.status, 'volume_name': volume_ref.name, 'volume_id': volume_ref.id } }
def revert(self, context, result, flow_failures, volume_id, **kwargs): # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're returning boolean flag that will # indicate that. It which will be available in flow engine store # through get_revert_result method. # If do not want to be rescheduled, just set the volume's status to # error and return. if not self.do_reschedule: common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: create failed"), volume_id) return False # Check if we have a cause which can tell us not to reschedule and # set the volume's status to error. for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: create failed"), volume_id) return False # Use a different context when rescheduling. if self.reschedule_context: cause = list(flow_failures.values())[0] context = self.reschedule_context try: self._pre_reschedule(context, volume_id) self._reschedule(context, cause, volume_id=volume_id, **kwargs) self._post_reschedule(volume_id) return True except exception.CinderException: LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id) return False
def revert(self, context, result, flow_failures, volume_ref, **kwargs): # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're returning boolean flag that will # indicate that. It which will be available in flow engine store # through get_revert_result method. # If do not want to be rescheduled, just set the volume's status to # error and return. if not self.do_reschedule: common.error_out_volume(context, self.db, volume_ref.id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id) return False # Check if we have a cause which can tell us not to reschedule and # set the volume's status to error. for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): common.error_out_volume(context, self.db, volume_ref.id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id) return False # Use a different context when rescheduling. if self.reschedule_context: cause = list(flow_failures.values())[0] context = self.reschedule_context try: self._pre_reschedule(context, volume_ref) self._reschedule(context, cause, volume=volume_ref, **kwargs) self._post_reschedule(volume_ref) return True except exception.CinderException: LOG.exception(_LE("Volume %s: rescheduling failed"), volume_ref.id) return False
def revert(self, context, volume_id, result, **kwargs): if isinstance(result, ft.Failure) or not self.set_error: return reason = _('Volume create failed while extracting volume ref.') common.error_out_volume(context, self.db, volume_id, reason=reason) LOG.error(_LE("Volume %s: create failed"), volume_id)
def revert(self, context, volume_id, result, **kwargs): if isinstance(result, ft.Failure) or not self.set_error: return reason = _("Volume create failed while extracting volume ref.") common.error_out_volume(context, self.db, volume_id, reason=reason) LOG.error(_LE("Volume %s: create failed"), volume_id)
def execute(self, context, request_spec, filter_properties): try: self.driver_api.schedule_create_volume(context, request_spec, filter_properties) except Exception as e: # An error happened, notify on the scheduler queue and log that # this happened and set the volume to errored out and reraise the # error *if* exception caught isn't NoValidHost. Otherwise *do not* # reraise (since what's the point?) with excutils.save_and_reraise_exception( reraise=not isinstance(e, exception.NoValidHost)): if isinstance(e, exception.NoValidHost): self.message_api.create( context, defined_messages.UNABLE_TO_ALLOCATE, context.project_id, resource_type=resource_types.VOLUME, resource_uuid=request_spec['volume_id']) try: self._handle_failure(context, request_spec, e) finally: common.error_out_volume(context, self.db_api, request_spec['volume_id'], reason=e)
def revert(self, context, result, flow_failures, **kwargs): # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: manage failed."), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def execute(self, context, request_spec, filter_properties): try: self.driver_api.schedule_create_volume(context, request_spec, filter_properties) except Exception as e: # An error happened, notify on the scheduler queue and log that # this happened and set the volume to errored out and reraise the # error *if* exception caught isn't NoValidHost. Otherwise *do not* # reraise (since what's the point?) with excutils.save_and_reraise_exception(reraise=not isinstance(e, exception.NoValidHost)): try: self._handle_failure(context, request_spec, e) finally: common.error_out_volume(context, self.db_api, request_spec["volume_id"], reason=e)
def revert(self, context, result, flow_failures, **kwargs): if isinstance(result, ft.Failure): return # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.restore_source_status(context, self.db, kwargs) common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: create failed"), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def schedule_create_volume(context, request_spec, filter_properties): def _log_failure(cause): LOG.error( _("Failed to schedule_create_volume: %(cause)s") % {'cause': cause}) def _notify_failure(cause): """When scheduling fails send out a event that it failed.""" topic = "scheduler.create_volume" payload = { 'request_spec': request_spec, 'volume_properties': request_spec.get('volume_properties', {}), 'volume_id': volume_id, 'state': 'error', 'method': 'create_volume', 'reason': cause, } try: publisher_id = notifier.publisher_id("scheduler") notifier.notify(context, publisher_id, topic, notifier.ERROR, payload) except exception.CinderException: LOG.exception( _("Failed notifying on %(topic)s " "payload %(payload)s") % { 'topic': topic, 'payload': payload }) try: driver.schedule_create_volume(context, request_spec, filter_properties) except exception.NoValidHost as e: # Not host found happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do not* reraise the error (since whats the point). _notify_failure(e) _log_failure(e) common.error_out_volume(context, db, volume_id, reason=e) except Exception as e: # Some other error happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do* reraise the error. with excutils.save_and_reraise_exception(): _notify_failure(e) _log_failure(e) common.error_out_volume(context, db, volume_id, reason=e)
def schedule_create_volume(context, request_spec, filter_properties): def _log_failure(cause): LOG.error(_("Failed to schedule_create_volume: %(cause)s") % {'cause': cause}) def _notify_failure(cause): """When scheduling fails send out a event that it failed.""" topic = "scheduler.create_volume" payload = { 'request_spec': request_spec, 'volume_properties': request_spec.get('volume_properties', {}), 'volume_id': volume_id, 'state': 'error', 'method': 'create_volume', 'reason': cause, } try: publisher_id = notifier.publisher_id("scheduler") notifier.notify(context, publisher_id, topic, notifier.ERROR, payload) except exception.CinderException: LOG.exception(_("Failed notifying on %(topic)s " "payload %(payload)s") % {'topic': topic, 'payload': payload}) try: driver.schedule_create_volume(context, request_spec, filter_properties) except exception.NoValidHost as e: # Not host found happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do not* reraise the error (since whats the point). _notify_failure(e) _log_failure(e) common.error_out_volume(context, db, volume_id, reason=e) except Exception as e: # Some other error happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do* reraise the error. with excutils.save_and_reraise_exception(): _notify_failure(e) _log_failure(e) common.error_out_volume(context, db, volume_id, reason=e)
def execute(self, context, request_spec, filter_properties): try: self.driver_api.schedule_create_volume(context, request_spec, filter_properties) except exception.NoValidHost as e: # No host found happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do not* reraise the error (since whats the point). try: self._handle_failure(context, request_spec, e) finally: common.error_out_volume(context, self.db_api, request_spec["volume_id"], reason=e) except Exception as e: # Some other error happened, notify on the scheduler queue and log # that this happened and set the volume to errored out and # *do* reraise the error. with excutils.save_and_reraise_exception(): try: self._handle_failure(context, request_spec, e) finally: common.error_out_volume(context, self.db_api, request_spec["volume_id"], reason=e)
def execute(self, context, volume_ref, manage_existing_ref): if not self.driver.initialized: driver_name = self.driver.__class__.__name__ LOG.error(_LE("Unable to manage existing volume. " "Volume driver %s not initialized.") % driver_name) flow_common.error_out_volume(context, self.db, volume_ref.id, reason=_("Volume driver %s " "not initialized.") % driver_name) raise exception.DriverNotInitialized() size = self.driver.manage_existing_get_size(volume_ref, manage_existing_ref) return {'size': size, 'volume_type_id': volume_ref.volume_type_id, 'volume_properties': volume_ref, 'volume_spec': {'status': volume_ref.status, 'volume_name': volume_ref.name, 'volume_id': volume_ref.id}}
def execute(self, context, volume_ref, manage_existing_ref): volume_id = volume_ref["id"] if not self.driver.initialized: driver_name = self.driver.__class__.__name__ LOG.error(_LE("Unable to manage existing volume. " "Volume driver %s not initialized.") % driver_name) flow_common.error_out_volume( context, self.db, volume_id, reason=_("Volume driver %s " "not initialized.") % driver_name ) raise exception.DriverNotInitialized() size = self.driver.manage_existing_get_size(volume_ref, manage_existing_ref) return { "size": size, "volume_type_id": volume_ref["volume_type_id"], "volume_properties": volume_ref, "volume_spec": { "status": volume_ref["status"], "volume_name": volume_ref["name"], "volume_id": volume_ref["id"], }, }
def revert(self, context, result, flow_failures, **kwargs): volume_id = kwargs['volume_id'] # If do not want to be rescheduled, just set the volume's status to # error and return. if not self.do_reschedule: common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: create failed"), volume_id) return # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're injecting this information into # exception that will be caught there. This is ugly and we need # TaskFlow to support better way of returning data from reverted flow. cause = list(flow_failures.values())[0] cause.exception.rescheduled = False # Check if we have a cause which can tell us not to reschedule and # set the volume's status to error. for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: create failed"), volume_id) return # Use a different context when rescheduling. if self.reschedule_context: context = self.reschedule_context try: self._pre_reschedule(context, volume_id) self._reschedule(context, cause, **kwargs) self._post_reschedule(context, volume_id) # Inject information that we rescheduled cause.exception.rescheduled = True except exception.CinderException: LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
def revert(self, context, result, flow_failures, volume_ref, **kwargs): volume_id = volume_ref.id reason = _('Volume manage failed.') flow_common.error_out_volume(context, self.db, volume_id, reason=reason) LOG.error(_LE("Volume %s: manage failed."), volume_id)
def revert(self, context, volume_id, result, **kwargs): if isinstance(result, misc.Failure): return common.error_out_volume(context, self.db, volume_id) LOG.error(_("Volume %s: create failed"), volume_id)