def handle_failure(self, task, req, store_errors=True, call_errbacks=True): """Handle exception.""" _, _, tb = sys.exc_info() try: exc = self.retval # make sure we only send pickleable exceptions back to parent. einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) task.backend.mark_as_failure( req.id, exc, einfo.traceback, request=req, store_result=store_errors, call_errbacks=call_errbacks, ) task.on_failure(exc, req.id, req.args, req.kwargs, einfo) signals.task_failure.send(sender=task, task_id=req.id, exception=exc, args=req.args, kwargs=req.kwargs, traceback=tb, einfo=einfo) self._log_error(task, req, einfo) return einfo finally: del(tb)
def test_on_retry(self): job = Request({"task": self.mytask.name, "id": uuid(), "args": [1], "kwargs": {"f": "x"}}, app=self.app) job.eventer = MockEventDispatcher() try: raise Retry("foo", KeyError("moofoobar")) except: einfo = ExceptionInfo() job.on_failure(einfo) self.assertIn("task-retried", job.eventer.sent) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def test_on_retry(self): job = self.get_request(self.mytask.s(1, f='x')) job.eventer = MockEventDispatcher() try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) self.assertIn('task-retried', job.eventer.sent) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def test_on_retry(self): tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) tw.eventer = MockEventDispatcher() try: raise RetryTaskError('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() tw.on_failure(einfo) self.assertIn('task-retried', tw.eventer.sent) prev, module._does_info = module._does_info, False try: tw.on_failure(einfo) finally: module._does_info = prev einfo.internal = True tw.on_failure(einfo)
def handle_failure(self, task, store_errors=True): """Handle exception.""" req = task.request type_, _, tb = sys.exc_info() try: exc = self.retval einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) if store_errors: task.backend.mark_as_failure(req.id, exc, einfo.traceback, request=req) task.on_failure(exc, req.id, req.args, req.kwargs, einfo) signals.task_failure.send( sender=task, task_id=req.id, exception=exc, args=req.args, kwargs=req.kwargs, traceback=tb, einfo=einfo ) return einfo finally: del (tb)
def test_on_retry(self): job = Request({ 'task': self.mytask.name, 'id': uuid(), 'args': [1], 'kwargs': {'f': 'x'}, }, app=self.app) job.eventer = MockEventDispatcher() try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) self.assertIn('task-retried', job.eventer.sent) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def test_on_retry(self): job = self.get_request(self.mytask.s(1, f='x')) job.eventer = Mock(name='.eventer') try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) job.eventer.send.assert_called_with( 'task-retried', uuid=job.id, exception=safe_repr(einfo.exception.exc), traceback=safe_str(einfo.traceback), ) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() try: exc = real_exc if exc is None else exc exception_info = ExceptionInfo((type_, exc, tb)) self.mark_as_failure(task_id, exc, exception_info.traceback) return exception_info finally: if sys.version_info >= (3, 5, 0): while tb is not None: try: tb.tb_frame.clear() tb.tb_frame.f_locals except RuntimeError: # Ignore the exception raised if the frame is still executing. pass tb = tb.tb_next elif (2, 7, 0) <= sys.version_info < (3, 0, 0): sys.exc_clear() del tb
def handle_retry(self, task, req, store_errors=True, **kwargs): """Handle retry exception.""" # the exception raised is the Retry semi-predicate, # and it's exc' attribute is the original exception raised (if any). type_, _, tb = sys.exc_info() try: reason = self.retval einfo = ExceptionInfo((type_, reason, tb)) if store_errors: task.backend.mark_as_retry( req.id, reason.exc, einfo.traceback, request=req, ) task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) signals.task_retry.send(sender=task, request=req, reason=reason, einfo=einfo) info(LOG_RETRY, { 'id': req.id, 'name': get_task_name(req, task.name), 'exc': str(reason), }) return einfo finally: del tb
def _run(self, *args, **kwargs): lock = None self.extra_data = {} if self.ip: ip = InformationPackage.objects.select_related( 'submission_agreement').get(pk=self.ip) self.extra_data.update( fill_specification_data(ip=ip, sa=ip.submission_agreement)) lock = ip.get_lock() lock.acquire(blocking=True) if self.step: step = ProcessStep.objects.get(pk=self.step) for ancestor in step.get_ancestors(include_self=True): self.extra_data.update(ancestor.context) try: if self.undo_type: res = self.undo(*args, **kwargs) else: res = self.run(*args, **kwargs) except exceptions.Ignore: raise except Exception as e: einfo = ExceptionInfo() self.failure(e, self.task_id, args, kwargs, einfo) if self.eager: self.after_return(celery_states.FAILURE, e, self.task_id, args, kwargs, einfo) raise else: self.success(res, self.task_id, args, kwargs) finally: if lock is not None: lock.release() return res
def test_on_failure_WorkerLostError_redelivered_True(self): try: raise WorkerLostError() except WorkerLostError: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = False req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = True req.task.backend = Mock() with self.assert_signal_called(task_failure, sender=req.task, task_id=req.id, exception=einfo.exception, args=req.args, kwargs=req.kwargs, traceback=einfo.traceback, einfo=einfo): req.on_failure(einfo) req.task.backend.mark_as_failure.assert_called_once_with( req.id, einfo.exception, request=req._context, store_result=True)
def handle_retry(self, task, store_errors=True): """Handle retry exception.""" # the exception raised is the Retry semi-predicate, # and it's exc' attribute is the original exception raised (if any). req = task.request type_, _, tb = sys.exc_info() try: reason = self.retval einfo = ExceptionInfo((type_, reason, tb)) if store_errors: task.backend.mark_as_retry( req.id, reason.exc, einfo.traceback, request=req, ) task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) signals.task_retry.send(sender=task, request=req, reason=reason, einfo=einfo) return einfo finally: del (tb)
def catch_exception(exception): try: raise exception except exception.__class__ as exc: exc = current_app.backend.prepare_exception(exc) return exc, ExceptionInfo(sys.exc_info()).traceback
def handle_ignore(self, task, req, **kwargs): self._log_error(task, req, ExceptionInfo())
def handle_reject(self, task, req, **kwargs): self._log_error(task, req, ExceptionInfo())
def trace_task(uuid, args, kwargs, request=None): # R - is the possibly prepared return value. # I - is the Info object. # T - runtime # Rstr - textual representation of return value # retval - is the always unmodified return value. # state - is the resulting task state. # This function is very long because we've unrolled all the calls # for performance reasons, and because the function is so long # we want the main variables (I, and R) to stand out visually from the # the rest of the variables, so breaking PEP8 is worth it ;) R = I = T = Rstr = retval = state = None task_request = None time_start = monotonic() try: try: kwargs.items except AttributeError: raise InvalidTaskError( 'Task keyword arguments is not a mapping') push_task(task) task_request = Context(request or {}, args=args, called_directly=False, kwargs=kwargs) root_id = task_request.root_id or uuid task_priority = task_request.delivery_info.get('priority') if \ inherit_parent_priority else None push_request(task_request) try: # -*- PRE -*- if prerun_receivers: send_prerun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs) loader_task_init(uuid, task) if track_started: store_result( uuid, {'pid': pid, 'hostname': hostname}, STARTED, request=task_request, ) # -*- TRACE -*- try: R = retval = fun(*args, **kwargs) state = SUCCESS except Reject as exc: I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval I.handle_reject(task, task_request) traceback_clear(exc) except Ignore as exc: I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval I.handle_ignore(task, task_request) traceback_clear(exc) except Retry as exc: I, R, state, retval = on_error( task_request, exc, uuid, RETRY, call_errbacks=False) traceback_clear(exc) except Exception as exc: I, R, state, retval = on_error(task_request, exc, uuid) traceback_clear(exc) except BaseException: raise else: try: # callback tasks must be applied before the result is # stored, so that result.children is populated. # groups are called inline and will store trail # separately, so need to call them separately # so that the trail's not added multiple times :( # (Issue #1936) callbacks = task.request.callbacks if callbacks: if len(task.request.callbacks) > 1: sigs, groups = [], [] for sig in callbacks: sig = signature(sig, app=app) if isinstance(sig, group): groups.append(sig) else: sigs.append(sig) for group_ in groups: group_.apply_async( (retval,), parent_id=uuid, root_id=root_id, priority=task_priority ) if sigs: group(sigs, app=app).apply_async( (retval,), parent_id=uuid, root_id=root_id, priority=task_priority ) else: signature(callbacks[0], app=app).apply_async( (retval,), parent_id=uuid, root_id=root_id, priority=task_priority ) # execute first task in chain chain = task_request.chain if chain: _chsig = signature(chain.pop(), app=app) _chsig.apply_async( (retval,), chain=chain, parent_id=uuid, root_id=root_id, priority=task_priority ) mark_as_done( uuid, retval, task_request, publish_result, ) except EncodeError as exc: I, R, state, retval = on_error(task_request, exc, uuid) else: Rstr = saferepr(R, resultrepr_maxsize) T = monotonic() - time_start if task_on_success: task_on_success(retval, uuid, args, kwargs) if success_receivers: send_success(sender=task, result=retval) if _does_info: info(LOG_SUCCESS, { 'id': uuid, 'name': get_task_name(task_request, name), 'return_value': Rstr, 'runtime': T, }) # -* POST *- if state not in IGNORE_STATES: if task_after_return: task_after_return( state, retval, uuid, args, kwargs, None, ) finally: try: if postrun_receivers: send_postrun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs, retval=retval, state=state) finally: pop_task() pop_request() if not eager: try: backend_cleanup() loader_cleanup() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception as exc: logger.error('Process cleanup failed: %r', exc, exc_info=True) except MemoryError: raise except Exception as exc: _signal_internal_error(task, uuid, args, kwargs, request, exc) if eager: raise R = report_internal_error(task, exc) if task_request is not None: I, _, _, _ = on_error(task_request, exc, uuid) return trace_ok_t(R, I, T, Rstr)
def cancel_export_provider_task(result={}, export_provider_task_uid=None, canceling_user=None): """ Cancels an ExportProviderTask and terminates each subtasks execution. """ from ..tasks.models import ExportProviderTask, ExportTaskException, ExportTaskResult from ..tasks.exceptions import CancelException from billiard.einfo import ExceptionInfo from datetime import datetime, timedelta export_provider_task = ExportProviderTask.objects.filter( uid=export_provider_task_uid).first() if not export_provider_task: result['result'] = False return result export_tasks = export_provider_task.tasks.all() # Loop through both the tasks in the ExportProviderTask model, as well as the Task Chain in celery for export_task in export_tasks.filter(~Q( status=TaskStates.CANCELED.value)): export_task.status = TaskStates.CANCELED.value export_task.cancel_user = canceling_user export_task.save() # This part is to populate the UI with the cancel message. If a different mechanism is incorporated # to pass task information to the users, then it may make sense to replace this. try: raise CancelException(task_name=export_provider_task.name, user_name=canceling_user) except CancelException as ce: einfo = ExceptionInfo() einfo.exception = ce ExportTaskException.objects.create(task=export_task, exception=cPickle.dumps(einfo)) # Remove the ExportTaskResult, which will clean up the files. task_result = ExportTaskResult.objects.filter(task=export_task).first() if task_result: task_result.delete() if export_task.pid and export_task.worker: kill_task.apply_async( kwargs={ "task_pid": export_task.pid, "celery_uid": export_task.celery_uid }, queue="{0}.cancel".format(export_task.worker), priority=TaskPriority.CANCEL.value, routing_key="{0}.cancel".format(export_task.worker)) export_provider_task.status = TaskStates.CANCELED.value export_provider_task.save() # Because the task is revoked the follow on is never run... if using revoke this is required, if using kill, # this can probably be removed as the task will simply fail and the follow on task from the task_factory will # pick up the task. run_uid = export_provider_task.run.uid worker = export_provider_task.tasks.first().worker # Because we don't care about the files in a canceled task the stage dir can be the run dir, # which will be cleaned up in final steps. stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT.rstrip('\/'), str(run_uid)) finalize_export_provider_task.si( run_uid=run_uid, stage_dir=stage_dir, export_provider_task_uid=export_provider_task_uid, worker=worker).set(queue=worker, routing_key=worker).apply_async( interval=1, max_retries=10, expires=datetime.now() + timedelta(days=2), priority=TaskPriority.FINALIZE_PROVIDER.value, routing_key=worker, queue=worker) return result
def get_ei(): try: raise WorkerLostError('do re mi') except WorkerLostError: return ExceptionInfo()
def test_handle_reject(self, ExceptionInfo): x = self.TI(states.FAILURE) x._log_error = Mock(name='log_error') req = Mock(name='req') x.handle_reject(self.add, req) x._log_error.assert_called_with(self.add, req, ExceptionInfo())
def __call__(self, *args, **kwargs): try: self.taskobj = kwargs['taskobj'] except KeyError: print "Task requires taskobj set to a ProcessTask" self.eager = kwargs.get("eager", False) try: prev_result_dict = args[0] except IndexError: prev_result_dict = {} if self.taskobj.result_params: for k, v in self.taskobj.result_params.iteritems(): self.taskobj.params[k] = prev_result_dict[v] self.taskobj.hidden = self.taskobj.hidden or self.hidden self.taskobj.celery_id = self.request.id self.taskobj.status=celery_states.STARTED self.taskobj.time_started = timezone.now() self.taskobj.save() if self.eager: try: res = self.run(**self.taskobj.params) self.taskobj.result = res self.taskobj.status = celery_states.SUCCESS self.taskobj.time_done = timezone.now() try: self.taskobj.save(update_fields=['result', 'status', 'time_done']) except DatabaseError: self.taskobj.save() if self.taskobj.log: self.create_event( 0, self.event_outcome_success(**self.taskobj.params) ) return res except: self.taskobj.einfo = ExceptionInfo() self.taskobj.status = celery_states.FAILURE self.taskobj.time_done = timezone.now() try: self.taskobj.save(update_fields=['einfo', 'status', 'time_done']) except DatabaseError: self.taskobj.save() outcome = 1 outcome_detail_note = self.taskobj.einfo.traceback self.create_event( outcome, outcome_detail_note ) raise if self.taskobj.undo_type: if hasattr(settings, 'CELERY_ALWAYS_EAGER') and settings.CELERY_ALWAYS_EAGER: try: res = self.undo(**self.taskobj.params) prev_result_dict[self.taskobj.id] = res self.on_success(prev_result_dict, None, args, kwargs) self.after_return(celery_states.SUCCESS, res, None, args, kwargs, None) return res except Exception as e: einfo = ExceptionInfo() self.on_failure(e, None, args, kwargs, einfo) self.after_return(celery_states.FAILURE, e, None, args, kwargs, einfo) raise else: return self.undo(**self.taskobj.params) else: if hasattr(settings, 'CELERY_ALWAYS_EAGER') and settings.CELERY_ALWAYS_EAGER: try: res = self.run(**self.taskobj.params) prev_result_dict[self.taskobj.id] = res self.on_success(prev_result_dict, None, args, kwargs) self.after_return(celery_states.SUCCESS, res, None, args, kwargs, None) except Exception as e: einfo = ExceptionInfo() self.on_failure(e, None, args, kwargs, einfo) self.after_return(celery_states.FAILURE, e, None, args, kwargs, einfo) raise else: prev_result_dict[self.taskobj.id] = self.run(**self.taskobj.params) self.create_event(None, "") return prev_result_dict
def get_ei(): try: raise KeyError('moofoobar') except: return ExceptionInfo()
def __call__(self, *args, **kwargs): options = kwargs.pop('_options', {}) self.chunk = options.get('chunk', False) self.args = options.get('args', []) self.responsible = options.get('responsible') self.ip = options.get('ip') if self.ip is not None: self.ip_objid = get_cached_objid(str(self.ip)) self.step = options.get('step') self.step_pos = options.get('step_pos') self.hidden = options.get('hidden', False) or self.hidden self.undo_type = options.get('undo', False) self.result_params = options.get('result_params', {}) or {} self.task_id = options.get('task_id') or self.request.id self.eager = options.get('eager') or self.request.is_eager if self.chunk: res = [] events = [] if not connection.features.autocommits_when_autocommit_is_off: transaction.set_autocommit(False) try: for a in args: a_options = a.pop('_options') self.eager = True self.task_id = a_options['task_id'] self.args = a_options['args'] self.progress = 0 hidden = a_options.get('hidden', False) or self.hidden time_started = timezone.now() try: retval = self._run(*self.args, **a) except: ProcessTask.objects.filter(pk=self.task_id).update( hidden=hidden, time_started=time_started, progress=self.progress) einfo = ExceptionInfo() if self.event_type: self.create_event(self.task_id, celery_states.FAILURE, args, a, None, einfo) raise else: self.success(retval, self.task_id, None, kwargs) ProcessTask.objects.filter(pk=self.task_id).update( result=retval, status=celery_states.SUCCESS, hidden=hidden, time_started=time_started, time_done=timezone.now(), progress=100) res.append(retval) if self.event_type: self.create_event(self.task_id, celery_states.SUCCESS, self.args, a, retval, None) except: raise else: return res finally: if not connection.features.autocommits_when_autocommit_is_off: transaction.commit() transaction.set_autocommit(True) for k, v in six.iteritems(self.result_params): kwargs[k] = get_result(v, self.eager) if self.track: ProcessTask.objects.filter(pk=self.task_id).update( hidden=self.hidden, status=celery_states.STARTED, time_started=timezone.now()) return self._run(*args, **kwargs)
def trace_task(uuid, args, kwargs, request=None): # R - is the possibly prepared return value. # I - is the Info object. # retval - is the always unmodified return value. # state - is the resulting task state. # This function is very long because we have unrolled all the calls # for performance reasons, and because the function is so long # we want the main variables (I, and R) to stand out visually from the # the rest of the variables, so breaking PEP8 is worth it ;) R = I = retval = state = None kwargs = kwdict(kwargs) try: push_task(task) task_request = Context(request or {}, args=args, called_directly=False, kwargs=kwargs) push_request(task_request) try: # -*- PRE -*- if prerun_receivers: send_prerun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs) loader_task_init(uuid, task) if track_started: store_result( uuid, { 'pid': pid, 'hostname': hostname }, STARTED, request=task_request, ) # -*- TRACE -*- try: R = retval = fun(*args, **kwargs) state = SUCCESS except Reject as exc: I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval except Ignore as exc: I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval except Retry as exc: I, R, state, retval = on_error( task_request, exc, uuid, RETRY, call_errbacks=False, ) except Exception as exc: I, R, state, retval = on_error(task_request, exc, uuid) except BaseException as exc: raise else: try: # callback tasks must be applied before the result is # stored, so that result.children is populated. # groups are called inline and will store trail # separately, so need to call them separately # so that the trail's not added multiple times :( # (Issue #1936) callbacks = task.request.callbacks if callbacks: if len(task.request.callbacks) > 1: sigs, groups = [], [] for sig in callbacks: sig = signature(sig, app=app) if isinstance(sig, group): groups.append(sig) else: sigs.append(sig) for group_ in groups: group_.apply_async((retval, )) if sigs: group(sigs).apply_async((retval, )) else: signature(callbacks[0], app=app).delay(retval) if publish_result: store_result( uuid, retval, SUCCESS, request=task_request, ) except EncodeError as exc: I, R, state, retval = on_error(task_request, exc, uuid) else: if task_on_success: task_on_success(retval, uuid, args, kwargs) if success_receivers: send_success(sender=task, result=retval) # -* POST *- if state not in IGNORE_STATES: if task_request.chord: on_chord_part_return(task, state, R) if task_after_return: task_after_return( state, retval, uuid, args, kwargs, None, ) finally: try: if postrun_receivers: send_postrun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs, retval=retval, state=state) finally: pop_task() pop_request() if not eager: try: backend_cleanup() loader_cleanup() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception as exc: _logger.error('Process cleanup failed: %r', exc, exc_info=True) except MemoryError: raise except Exception as exc: if eager: raise R = report_internal_error(task, exc) return R, I
def raise_something(i): try: raise KeyError('FOO EXCEPTION') except KeyError: return ExceptionInfo()
def trace_task(uuid, args, kwargs, request=None): R = I = None kwargs = kwdict(kwargs) try: push_task(task) task_request = Context(request or {}, args=args, called_directly=False, kwargs=kwargs) push_request(task_request) try: # -*- PRE -*- if prerun_receivers: send_prerun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs) loader_task_init(uuid, task) if track_started: store_result(uuid, { 'pid': pid, 'hostname': hostname }, STARTED) # -*- TRACE -*- try: R = retval = fun(*args, **kwargs) state = SUCCESS except Ignore as exc: I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval except RetryTaskError as exc: I = Info(RETRY, exc) state, retval = I.state, I.retval R = I.handle_error_state(task, eager=eager) except Exception as exc: if propagate: raise I = Info(FAILURE, exc) state, retval = I.state, I.retval R = I.handle_error_state(task, eager=eager) [ subtask(errback).apply_async((uuid, )) for errback in task_request.errbacks or [] ] except BaseException as exc: raise else: # callback tasks must be applied before the result is # stored, so that result.children is populated. [ subtask(callback).apply_async((retval, )) for callback in task_request.callbacks or [] ] if publish_result: store_result(uuid, retval, SUCCESS) if task_on_success: task_on_success(retval, uuid, args, kwargs) if success_receivers: send_success(sender=task, result=retval) # -* POST *- if state not in IGNORE_STATES: if task_request.chord: on_chord_part_return(task) if task_after_return: task_after_return( state, retval, uuid, args, kwargs, None, ) if postrun_receivers: send_postrun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs, retval=retval, state=state) finally: pop_task() pop_request() if not eager: try: backend_cleanup() loader_cleanup() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception as exc: _logger.error('Process cleanup failed: %r', exc, exc_info=True) except MemoryError: raise except Exception as exc: if eager: raise R = report_internal_error(task, exc) return R, I