def test_report_failure_skipped_when_not_in_debug_mode(self, patch): log = patch('h.celery.log') sender = mock.Mock(spec=['app']) sender.name = 'wibble' sender.app.request.debug = False # Make a fake ExceptionInfo object try: raise RuntimeError('asplode!') except: einfo = ExceptionInfo() celery.report_failure(sender, 'abc123', (), {}, einfo) assert not log.error.called
def test_on_failure_acks_on_failure_or_timeout_disabled(self): self.app.conf.acks_on_failure_or_timeout = False job = self.xRequest() job.time_start = 1 self.mytask.acks_late = True self.mytask.acks_on_failure_or_timeout = False try: raise KeyError('foo') except KeyError: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job.acknowledged is True job._on_reject.assert_called_with(req_logger, job.connection_errors, False) self.app.conf.acks_on_failure_or_timeout = True
def test_report_failure_skipped_when_not_in_debug_mode(self, patch): log = patch("h.celery.log") sender = mock.Mock(spec=["app"]) sender.name = "wibble" sender.app.request.debug = False # Make a fake ExceptionInfo object try: raise RuntimeError("asplode!") except: einfo = ExceptionInfo() celery.report_failure(sender, "abc123", (), {}, einfo) assert not log.error.called
def test_on_failure_task_cancelled(self): job = self.xRequest() job.eventer = Mock() job.time_start = 1 job._already_cancelled = True try: raise Terminated() except Terminated: exc_info = ExceptionInfo() job.on_failure(exc_info) job.on_failure(exc_info) assert not job.eventer.send.called
def test_exception_info(self): try: raise LookupError('The quick brown fox jumps...') except Exception: einfo = ExceptionInfo() self.assertEqual(str(einfo), einfo.traceback) self.assertIsInstance(einfo.exception, LookupError) self.assertTupleEqual( einfo.exception.args, ('The quick brown fox jumps...',), ) self.assertTrue(einfo.traceback) r = repr(einfo) self.assertTrue(r)
def decorated_view(task, params, *args, **kwargs): entity_class = import_class(params[0]) if entity_class.module is not None: mod = task.app.api_manager.modules[entity_class.module] task_local.controller = mod.get_controller() elif module is not None: mod = task.app.api_manager.modules[module] task_local.controller = mod.get_controller() task_local.entity_class = entity_class task_local.objid = params[1] task_local.op = params[2] task_local.opid = params[3] task_local.delta = params[5] task_local.api_id = params[7] operation.perms = [] operation.user = params[6] operation.id = params[7] operation.session = None operation.transaction = None operation.authorize = False operation.cache = False operation.encryption_key = task.app.api_manager.app_fernet_key res = None # task.update('STARTED', start_time=time(), msg='Start %s:%s' % (task.name, task.request.id)) task.update('STARTED', msg='START - %s:%s' % (task.name, task.request.id)) if synchronous: try: res = fn(task, params, *args, **kwargs) except: raise finally: task.release_session() else: try: res = fn(task, params, *args, **kwargs) except Exception as e: msg = 'FAIL - %s:%s caused by %s' % (task.name, task.request.id, e) task.on_failure(e, task.request.id, args, kwargs, ExceptionInfo()) logger.error(msg) finally: task.release_session() return res
def test_on_retry(self): job = self.get_request(self.mytask.s(1, f='x')) job.eventer = MockEventDispatcher() try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) self.assertIn('task-retried', job.eventer.sent) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def test_on_failure_Terminated(self): einfo = None try: raise Terminated('9') except Terminated: einfo = ExceptionInfo() assert einfo is not None req = self.get_request(self.add.s(2, 2)) req.on_failure(einfo) req.eventer.send.assert_called_with( 'task-revoked', uuid=req.id, terminated=True, signum='9', expired=False, )
def test_on_retry(self): tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}, app=self.app) tw.eventer = MockEventDispatcher() try: raise RetryTaskError('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() tw.on_failure(einfo) self.assertIn('task-retried', tw.eventer.sent) prev, module._does_info = module._does_info, False try: tw.on_failure(einfo) finally: module._does_info = prev einfo.internal = True tw.on_failure(einfo)
def _test_on_failure(self, exception, logger): app = self.app tid = uuid() tw = TaskRequest(mytask.name, tid, [4], {'f': 'x'}, app=self.app) try: raise exception except Exception: exc_info = ExceptionInfo() app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True try: tw.on_failure(exc_info) self.assertTrue(logger.log.called) context = logger.log.call_args[0][2] self.assertEqual(mytask.name, context['name']) self.assertIn(tid, context['id']) finally: app.conf.CELERY_SEND_TASK_ERROR_EMAILS = False
def test_on_failure_task_cancelled(self): job = self.xRequest() job.eventer = Mock() job.time_start = 1 job.message.channel.connection = None try: raise Terminated() except Terminated: exc_info = ExceptionInfo() job.on_failure(exc_info) assert job._already_cancelled job.on_failure(exc_info) job.eventer.send.assert_called_once_with('task-cancelled', uuid=job.id)
def test_on_failure_WorkerLostError_redelivered_None(self): try: raise WorkerLostError() except WorkerLostError: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = True req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = None req.task.backend = Mock() req.on_failure(einfo) req.on_reject.assert_called_with(req_logger, req.connection_errors, True) req.task.backend.mark_as_failure.assert_not_called()
def test_decode_encoded_exception_as_json(self): self.app.conf.elasticsearch_save_meta_as_text, prev = False, self.app.conf.elasticsearch_save_meta_as_text try: x = ElasticsearchBackend(app=self.app) try: raise Exception("failed") except Exception as exc: einfo = ExceptionInfo() result_meta = x._get_result_meta( x.encode_result(exc, states.FAILURE), states.FAILURE, einfo.traceback, None, ) assert x.decode(x.encode(result_meta)) == result_meta finally: self.app.conf.elasticsearch_save_meta_as_text = prev
def test_retry(): # With celery running in eager mode, the on_retry handler doesn't actually # get called when a retry happens. Here we just try to show that when it does # get called, the log message is formatted correctly. task = LoggedTask() task_id = 'my-id' args = (1, 2) kwargs = {'c': 3} try: raise ValueError() except ValueError as exc: einfo = ExceptionInfo() with mock.patch('celery_utils.logged_task.log') as mocklog: task.on_retry(exc, task_id, args, kwargs, einfo) logmessage = mocklog.warning.call_args[0][0] assert f'[{task_id}]' in logmessage assert einfo.traceback in logmessage
def on_success(self, retval, task_id, args, kwargs): try: logger.info( 'my task success and taskid is {} ,retval is{} ,args is{}.kwargs id {}' .format(task_id, retval, args, kwargs)) # 如果执行成功,且有下一步,则执行下一步 if self.do_success(retval, task_id, args, kwargs) and kwargs.get('next_task_kwargs'): for next_task_kwarg in kwargs['next_task_kwargs']: with session_scope() as ss: from worker.run_task import run_celery_task run_celery_task(session=ss, **next_task_kwarg) except Exception as e: einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) self.on_failure(e, task_id, args, kwargs, einfo)
def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() try: exc = real_exc if exc is None else exc exception_info = ExceptionInfo((type_, exc, tb)) self.mark_as_failure(task_id, exc, exception_info.traceback) return exception_info finally: while tb is not None: try: tb.tb_frame.clear() tb.tb_frame.f_locals except RuntimeError: # Ignore the exception raised if the frame is still executing. pass tb = tb.tb_next del tb
def handle_retry(self, task, store_errors=True): """Handle retry exception.""" # the exception raised is the RetryTaskError semi-predicate, # and it's exc' attribute is the original exception raised (if any). req = task.request type_, _, tb = sys.exc_info() try: reason = self.retval einfo = ExceptionInfo((type_, reason, tb)) if store_errors: task.backend.mark_as_retry(req.id, reason.exc, einfo.traceback) task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) signals.task_retry.send(sender=task, request=req, reason=reason, einfo=einfo) return einfo finally: del (tb)
def simulate_async_error(self, exception): """ Take this exception and store it as an error in the result backend. This unifies the handling of broker-connection errors with any other type of error that might occur when running the task. So the same error-handling that might retry a task or display a useful message to the user can also handle this error. """ task_id = gen_unique_id() async_result = self.AsyncResult(task_id) einfo = ExceptionInfo(sys.exc_info()) async_result.backend.mark_as_failure( task_id, exception, traceback=einfo.traceback, ) return async_result
def apply_target(target, args=(), kwargs={}, callback=None, accept_callback=None, pid=None, getpid=os.getpid, propagate=(), monotonic=monotonic, **_): if accept_callback: accept_callback(pid or getpid(), monotonic()) try: ret = target(*args, **kwargs) except propagate: raise except Exception: raise except BaseException as exc: try: reraise(WorkerLostError, WorkerLostError(repr(exc)), sys.exc_info()[2]) except WorkerLostError: callback(ExceptionInfo()) else: callback(ret)
def _signal_internal_error(task, uuid, args, kwargs, request, exc): """Send a special `internal_error` signal to the app for outside body errors.""" try: _, _, tb = sys.exc_info() einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) signals.task_internal_error.send( sender=task, task_id=uuid, args=args, kwargs=kwargs, request=request, exception=exc, traceback=tb, einfo=einfo, ) finally: del tb
def test_on_failure_acks_late_reject_on_worker_lost_enabled(self): try: raise WorkerLostError() except WorkerLostError: exc_info = ExceptionInfo() self.mytask.acks_late = True self.mytask.reject_on_worker_lost = True job = self.xRequest() job.delivery_info['redelivered'] = False job.on_failure(exc_info) assert self.mytask.backend.get_status(job.id) == states.PENDING job = self.xRequest() job.delivery_info['redelivered'] = True job.on_failure(exc_info) assert self.mytask.backend.get_status(job.id) == states.PENDING
def test_on_retry(self): job = self.get_request(self.mytask.s(1, f='x')) job.eventer = Mock(name='.eventer') try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) job.eventer.send.assert_called_with( 'task-retried', uuid=job.id, exception=safe_repr(einfo.exception.exc), traceback=safe_str(einfo.traceback), ) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def handle_failure(self, task, req, store_errors=True): """Handle exception.""" type_, _, tb = sys.exc_info() try: exc = self.retval einfo = ExceptionInfo() einfo.exception = get_pickleable_exception(einfo.exception) einfo.type = get_pickleable_etype(einfo.type) task.backend.mark_as_failure( req.id, exc, einfo.traceback, req, store_errors, ) task.on_failure(exc, req.id, req.args, req.kwargs, einfo) signals.task_failure.send(sender=task, task_id=req.id, exception=exc, args=req.args, kwargs=req.kwargs, traceback=tb, einfo=einfo) self._log_error(task, req, einfo) return einfo finally: del(tb)
def test_on_retry(self): job = Request({ 'task': self.mytask.name, 'id': uuid(), 'args': [1], 'kwargs': {'f': 'x'}, }, app=self.app) job.eventer = MockEventDispatcher() try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) self.assertIn('task-retried', job.eventer.sent) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def apply_target(target, args=(), kwargs={}, callback=None, accept_callback=None, pid=None, getpid=os.getpid, propagate=(), monotonic=monotonic, **_): """Apply function within pool context.""" if accept_callback: accept_callback(pid or getpid(), monotonic()) try: ret = target(*args, **kwargs) except propagate: raise except Exception: raise except (WorkerShutdown, WorkerTerminate): raise except BaseException as exc: try: reraise(WorkerLostError, WorkerLostError(repr(exc)), sys.exc_info()[2]) except WorkerLostError: callback(ExceptionInfo()) else: callback(ret)
def handle_retry(self, task, req, store_errors=True, **kwargs): """Handle retry exception.""" # the exception raised is the Retry semi-predicate, # and it's exc' attribute is the original exception raised (if any). type_, _, tb = sys.exc_info() try: reason = self.retval einfo = ExceptionInfo((type_, reason, tb)) if store_errors: task.backend.mark_as_retry( req.id, reason.exc, einfo.traceback, request=req, ) task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) signals.task_retry.send(sender=task, request=req, reason=reason, einfo=einfo) info(LOG_RETRY, { 'id': req.id, 'name': task.name, 'exc': safe_repr(reason.exc), }) return einfo finally: del(tb)
def _run(self, *args, **kwargs): lock = None self.extra_data = {} if self.ip: ip = InformationPackage.objects.select_related( 'submission_agreement').get(pk=self.ip) self.extra_data.update( fill_specification_data(ip=ip, sa=ip.submission_agreement)) lock = ip.get_lock() lock.acquire(blocking=True) if self.step: step = ProcessStep.objects.get(pk=self.step) for ancestor in step.get_ancestors(include_self=True): self.extra_data.update(ancestor.context) try: if self.undo_type: res = self.undo(*args, **kwargs) else: res = self.run(*args, **kwargs) except exceptions.Ignore: raise except Exception as e: einfo = ExceptionInfo() self.failure(e, self.task_id, args, kwargs, einfo) if self.eager: self.after_return(celery_states.FAILURE, e, self.task_id, args, kwargs, einfo) raise else: self.success(res, self.task_id, args, kwargs) finally: if lock is not None: lock.release() return res
def test_on_failure_WorkerLostError_redelivered_True(self): try: raise WorkerLostError() except WorkerLostError: einfo = ExceptionInfo(internal=True) req = self.get_request(self.add.s(2, 2)) req.task.acks_late = False req.task.reject_on_worker_lost = True req.delivery_info['redelivered'] = True req.task.backend = Mock() with self.assert_signal_called(task_failure, sender=req.task, task_id=req.id, exception=einfo.exception, args=req.args, kwargs=req.kwargs, traceback=einfo.traceback, einfo=einfo): req.on_failure(einfo) req.task.backend.mark_as_failure.assert_called_once_with( req.id, einfo.exception, request=req._context, store_result=True)
def trace_task(uuid, args, kwargs, request=None): # R - is the possibly prepared return value. # I - is the Info object. # retval - is the always unmodified return value. # state - is the resulting task state. # This function is very long because we have unrolled all the calls # for performance reasons, and because the function is so long # we want the main variables (I, and R) to stand out visually from the # the rest of the variables, so breaking PEP8 is worth it ;) R = I = retval = state = None kwargs = kwdict(kwargs) try: push_task(task) task_request = Context(request or {}, args=args, called_directly=False, kwargs=kwargs) push_request(task_request) try: # -*- PRE -*- if prerun_receivers: send_prerun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs) loader_task_init(uuid, task) if track_started: store_result( uuid, { 'pid': pid, 'hostname': hostname }, STARTED, request=task_request, ) # -*- TRACE -*- try: R = retval = fun(*args, **kwargs) state = SUCCESS except Reject as exc: I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval except Ignore as exc: I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) state, retval = I.state, I.retval except Retry as exc: I, R, state, retval = on_error( task_request, exc, uuid, RETRY, call_errbacks=False, ) except Exception as exc: I, R, state, retval = on_error(task_request, exc, uuid) except BaseException as exc: raise else: try: # callback tasks must be applied before the result is # stored, so that result.children is populated. # groups are called inline and will store trail # separately, so need to call them separately # so that the trail's not added multiple times :( # (Issue #1936) callbacks = task.request.callbacks if callbacks: if len(task.request.callbacks) > 1: sigs, groups = [], [] for sig in callbacks: sig = signature(sig, app=app) if isinstance(sig, group): groups.append(sig) else: sigs.append(sig) for group_ in groups: group_.apply_async((retval, )) if sigs: group(sigs).apply_async((retval, )) else: signature(callbacks[0], app=app).delay(retval) if publish_result: store_result( uuid, retval, SUCCESS, request=task_request, ) except EncodeError as exc: I, R, state, retval = on_error(task_request, exc, uuid) else: if task_on_success: task_on_success(retval, uuid, args, kwargs) if success_receivers: send_success(sender=task, result=retval) # -* POST *- if state not in IGNORE_STATES: if task_request.chord: on_chord_part_return(task, state, R) if task_after_return: task_after_return( state, retval, uuid, args, kwargs, None, ) finally: try: if postrun_receivers: send_postrun(sender=task, task_id=uuid, task=task, args=args, kwargs=kwargs, retval=retval, state=state) finally: pop_task() pop_request() if not eager: try: backend_cleanup() loader_cleanup() except (KeyboardInterrupt, SystemExit, MemoryError): raise except Exception as exc: _logger.error('Process cleanup failed: %r', exc, exc_info=True) except MemoryError: raise except Exception as exc: if eager: raise R = report_internal_error(task, exc) return R, I
def catch_exception(exception): try: raise exception except exception.__class__ as exc: exc = current_app.backend.prepare_exception(exc) return exc, ExceptionInfo(sys.exc_info()).traceback