def _log_error(self, task, req, einfo): eobj = einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, sargs, skwargs = ( safe_repr(eobj), safe_str(einfo.traceback), einfo.exc_info, safe_repr(req.args), safe_repr(req.kwargs), ) policy = get_log_policy(task, einfo, eobj) context = { 'hostname': req.hostname, 'id': req.id, 'name': task.name, 'exc': exception, 'traceback': traceback, 'args': sargs, 'kwargs': skwargs, 'description': policy.description, 'internal': einfo.internal, } logger.log(policy.severity, policy.format.strip(), context, exc_info=exc_info if policy.traceback else None, extra={'data': context})
def on_task(self, task, task_reserved=task_reserved): """Handle received task. If the task has an `eta` we enter it into the ETA schedule, otherwise we move it the ready queue for immediate processing. """ if task.revoked(): return if self._does_info: info('Got task from broker: %s', task) if self.event_dispatcher.enabled: self.event_dispatcher.send('task-received', uuid=task.id, name=task.name, args=safe_repr(task.args), kwargs=safe_repr(task.kwargs), retries=task.request_dict.get('retries', 0), eta=task.eta and task.eta.isoformat(), expires=task.expires and task.expires.isoformat()) if task.eta: try: eta = timer2.to_timestamp(task.eta) except OverflowError as exc: error("Couldn't convert eta %s to timestamp: %r. Task: %r", task.eta, exc, task.info(safe=True), exc_info=True) task.acknowledge() else: self.qos.increment_eventually() self.timer.apply_at(eta, self.apply_eta_task, (task, ), priority=6) else: task_reserved(task) self._quick_put(task)
def as_task_v1(self, task_id, name, args=None, kwargs=None, countdown=None, eta=None, group_id=None, expires=None, retries=0, chord=None, callbacks=None, errbacks=None, reply_to=None, time_limit=None, soft_time_limit=None, create_sent_event=False, root_id=None, parent_id=None, shadow=None, now=None, timezone=None): args = args or () kwargs = kwargs or {} utc = self.utc if not isinstance(args, (list, tuple)): raise ValueError('task args must be a list or tuple') if not isinstance(kwargs, Mapping): raise ValueError('task keyword arguments must be a mapping') if countdown: # convert countdown to ETA now = now or self.app.now() timezone = timezone or self.app.timezone eta = now + timedelta(seconds=countdown) if utc: eta = to_utc(eta).astimezone(timezone) if isinstance(expires, numbers.Real): now = now or self.app.now() timezone = timezone or self.app.timezone expires = now + timedelta(seconds=expires) if utc: expires = to_utc(expires).astimezone(timezone) eta = eta and eta.isoformat() expires = expires and expires.isoformat() return task_message( headers={}, properties={ 'correlation_id': task_id, 'reply_to': reply_to or '', }, body={ 'task': name, 'id': task_id, 'args': args, 'kwargs': kwargs, 'retries': retries, 'eta': eta, 'expires': expires, 'utc': utc, 'callbacks': callbacks, 'errbacks': errbacks, 'timelimit': (time_limit, soft_time_limit), 'taskset': group_id, 'chord': chord, }, sent_event={ 'uuid': task_id, 'name': name, 'args': safe_repr(args), 'kwargs': safe_repr(kwargs), 'retries': retries, 'eta': eta, 'expires': expires, } if create_sent_event else None, )
def _message_report(self, body, message): return MESSAGE_REPORT.format( dump_body(message, body), safe_repr(message.content_type), safe_repr(message.content_encoding), safe_repr(message.delivery_info), )
def _log_error(self, exc_info): format = self.error_msg description = "raised exception" severity = logging.ERROR self.send_event("task-failed", uuid=self.id, exception=safe_repr(exc_info.exception), traceback=safe_str(exc_info.traceback)) if exc_info.internal: format = self.internal_error_msg description = "INTERNAL ERROR" severity = logging.CRITICAL context = {"hostname": self.hostname, "id": self.id, "name": self.name, "exc": safe_repr(exc_info.exception), "traceback": safe_str(exc_info.traceback), "args": safe_repr(self.args), "kwargs": safe_repr(self.kwargs), "description": description} logger.log(severity, format.strip(), context, exc_info=exc_info.exc_info, extra={"data": {"id": self.id, "name": self.name, "hostname": self.hostname}}) task_obj = self.app.tasks.get(self.name, object) task_obj.send_error_email(context, exc_info.exception)
def _log_error(self, task, req, einfo): eobj = einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, sargs, skwargs = ( safe_repr(eobj), safe_str(einfo.traceback), einfo.exc_info, safe_repr(req.args), safe_repr(req.kwargs), ) policy = get_log_policy(task, einfo, eobj) context = { "hostname": req.hostname, "id": req.id, "name": task.name, "exc": exception, "traceback": traceback, "args": sargs, "kwargs": skwargs, "description": policy.description, "internal": einfo.internal, } logger.log( policy.severity, policy.format.strip(), context, exc_info=exc_info if policy.traceback else None, extra={"data": context}, )
def _log_error(self, einfo, send_failed_event=True): einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, internal, sargs, skwargs = ( safe_repr(einfo.exception), safe_str(einfo.traceback), einfo.exc_info, einfo.internal, safe_repr(self.args), safe_repr(self.kwargs), ) format = self.error_msg description = 'raised exception' severity = logging.ERROR if send_failed_event: self.send_event( 'task-failed', exception=exception, traceback=traceback, ) if internal: if isinstance(einfo.exception, MemoryError): raise MemoryError('Process got: %s' % (einfo.exception, )) elif isinstance(einfo.exception, Reject): format = self.rejected_msg description = 'rejected' severity = logging.WARN exc_info = einfo self.reject(requeue=einfo.exception.requeue) elif isinstance(einfo.exception, Ignore): format = self.ignored_msg description = 'ignored' severity = logging.INFO exc_info = None self.acknowledge() else: format = self.internal_error_msg description = 'INTERNAL ERROR' severity = logging.CRITICAL context = { 'hostname': self.hostname, 'id': self.id, 'name': self.name, 'exc': exception, 'traceback': traceback, 'args': sargs, 'kwargs': skwargs, 'description': description, } logger.log(severity, format.strip(), context, exc_info=exc_info, extra={'data': {'id': self.id, 'name': self.name, 'args': sargs, 'kwargs': skwargs, 'hostname': self.hostname, 'internal': internal}}) self.task.send_error_email(context, einfo.exception)
def _send_event(self, request): self.send_event( 'task-received', uuid=request.id, name=request.name, args=safe_repr(request.args), kwargs=safe_repr(request.kwargs), retries=request.request_dict.get('retries', 0), eta=request.eta and request.eta.isoformat(), expires=request.expires and request.expires.isoformat())
def info(self, safe=False): return {'id': self.id, 'name': self.name, 'args': self.args if safe else safe_repr(self.args), 'kwargs': self.kwargs if safe else safe_repr(self.kwargs), 'hostname': self.hostname, 'time_start': self.time_start, 'acknowledged': self.acknowledged, 'delivery_info': self.delivery_info, 'worker_pid': self.worker_pid}
def on_retry(self, exc_info): """Handler called if the task should be retried.""" self.send_event("task-retried", uuid=self.id, exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback)) if _does_info: info(self.retry_msg.strip(), { "id": self.id, "name": self.name, "exc": safe_repr(exc_info.exception.exc)}, exc_info=exc_info)
def info(self, safe=False): return {"id": self.id, "name": self.name, "args": self.args if safe else safe_repr(self.args), "kwargs": self.kwargs if safe else safe_repr(self.kwargs), "hostname": self.hostname, "time_start": self.time_start, "acknowledged": self.acknowledged, "delivery_info": self.delivery_info, "worker_pid": self.worker_pid}
def prerun_handler(sender, task, task_id, args, kwargs, *options, **kwoptions): with transaction.atomic(): job_Locked, created = Job.objects.select_for_update().get_or_create( task_id=task_id, defaults={ "task": _simplify_task_name(task.name), "args": safe_repr(args), "kwargs": safe_repr(kwargs), "category": task.category, }, ) job_Locked.state = states.STARTED job_Locked.timestamp_prerun = get_utc_now() job_Locked.save()
def apply_async(self, target, args=[], kwargs={}, **options): """Equivalent of the :func:`apply` built-in function. Callbacks should optimally return as soon as possible since otherwise the thread which handles the result will get blocked. """ if self._does_debug: logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', target, safe_repr(args), safe_repr(kwargs)) return self.on_apply(target, args, kwargs, waitforslot=self.putlocks, **options)
def _log_error(self, einfo): einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, internal, sargs, skwargs = ( safe_repr(einfo.exception), safe_str(einfo.traceback), einfo.exc_info, einfo.internal, safe_repr(self.args), safe_repr(self.kwargs), ) format = self.error_msg description = "raised exception" severity = logging.ERROR self.send_event("task-failed", exception=exception, traceback=traceback) if internal: format = self.internal_error_msg description = "INTERNAL ERROR" severity = logging.CRITICAL context = { "hostname": self.hostname, "id": self.id, "name": self.name, "exc": exception, "traceback": traceback, "args": sargs, "kwargs": skwargs, "description": description, } logger.log( severity, format.strip(), context, exc_info=exc_info, extra={ "data": { "id": self.id, "name": self.name, "args": sargs, "kwargs": skwargs, "hostname": self.hostname, "internal": internal, } }, ) self.task.send_error_email(context, einfo.exception)
def before_task_publish_handler(body, *options, **kwoptions): with transaction.atomic(): task_name = _simplify_task_name(body["task"]) headers = kwoptions["headers"] task_id = body["id"] if not Job.objects.filter(task_id=task_id).exists(): Job.objects.create( task=task_name, args=safe_repr(body["args"]), kwargs=safe_repr(body["kwargs"]), task_id=task_id, state=states.PENDING, category=headers["category"], scheduled=headers.get("scheduled", False), )
def task_message_handler(message, body, ack, reject, callbacks, to_timestamp=to_timestamp): req = Req(body, on_ack=ack, on_reject=reject, app=app, hostname=hostname, eventer=eventer, task=task, connection_errors=connection_errors, message=message) # do check revoke purge befor task handler, skip the expired revoke revoked_tasks.purge(limit=None, offset=REVOKES_MAX) # paused.purge(limit=None, offset=REVOKES_MAX) if req.revoked(): return if _does_info: logger.info('hera Received task: %s', req) if events: send_event( 'task-received', uuid=req.id, name=req.name, args=safe_repr(req.args), kwargs=safe_repr(req.kwargs), retries=req.request_dict.get('retries', 0), eta=req.eta and req.eta.isoformat(), expires=req.expires and req.expires.isoformat(), ) if req.eta: try: if req.utc: eta = to_timestamp(to_system_tz(req.eta)) else: eta = to_timestamp(req.eta, timezone.local) except OverflowError as exc: error("Couldn't convert eta %s to timestamp: %r. Task: %r", req.eta, exc, req.info(safe=True), exc_info=True) req.acknowledge() else: consumer.qos.increment_eventually() call_at(eta, apply_eta_task, (req, ), priority=6) else: if rate_limits_enabled: bucket = get_bucket(task.name) if bucket: return limit_task(req, bucket, 1) task_reserved(req) if callbacks: [callback() for callback in callbacks] handle(req)
def on_success(self, ret_value, now=None): """Handler called if the task was successfully processed.""" if isinstance(ret_value, ExceptionInfo): if isinstance(ret_value.exception, (SystemExit, KeyboardInterrupt)): raise ret_value.exception return self.on_failure(ret_value) task_ready(self) if self.task.acks_late: self.acknowledge() if self.eventer and self.eventer.enabled: now = time.time() runtime = self.time_start and (time.time() - self.time_start) or 0 self.send_event("task-succeeded", uuid=self.id, result=safe_repr(ret_value), runtime=runtime) if _does_info: now = now or time.time() runtime = self.time_start and (time.time() - self.time_start) or 0 info( self.success_msg.strip(), { "id": self.id, "name": self.name, "return_value": self.repr_result(ret_value), "runtime": runtime })
def dump_body(m, body): """Format message body for debugging purposes.""" # v2 protocol does not deserialize body body = m.body if body is None else body if isinstance(body, buffer_t): body = bytes_t(body) return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024), len(m.body))
def on_success(self, ret_value, now=None): """Handler called if the task was successfully processed.""" if isinstance(ret_value, ExceptionInfo): if isinstance(ret_value.exception, ( SystemExit, KeyboardInterrupt)): raise ret_value.exception return self.on_failure(ret_value) task_ready(self) if self.task.acks_late: self.acknowledge() if self.eventer and self.eventer.enabled: now = time.time() runtime = self.time_start and (time.time() - self.time_start) or 0 self.send_event('task-succeeded', uuid=self.id, result=safe_repr(ret_value), runtime=runtime) if _does_info: now = now or time.time() runtime = self.time_start and (time.time() - self.time_start) or 0 info(self.success_msg.strip(), { 'id': self.id, 'name': self.name, 'return_value': self.repr_result(ret_value), 'runtime': runtime})
def dump_body(m, body): # v2 protocol does not deserialize body body = m.body if body is None else body if isinstance(body, buffer_t): body = bytes_t(body) return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024), len(m.body))
def on_success(self, ret_value, now=None, nowfun=monotonic): """Handler called if the task was successfully processed.""" if isinstance(ret_value, ExceptionInfo): if isinstance(ret_value.exception, (SystemExit, KeyboardInterrupt)): raise ret_value.exception return self.on_failure(ret_value) task_ready(self) if self.task.acks_late: self.acknowledge() if self.eventer and self.eventer.enabled: now = nowfun() runtime = self.time_start and (now - self.time_start) or 0 self.send_event('task-succeeded', result=safe_repr(ret_value), runtime=runtime) if _does_info: now = now or nowfun() runtime = self.time_start and (now - self.time_start) or 0 info( self.success_msg.strip(), { 'id': self.id, 'name': self.name, 'return_value': self.repr_result(ret_value), 'runtime': runtime })
def handle_retry(self, task, req, store_errors=True): """Handle retry exception.""" # the exception raised is the Retry semi-predicate, # and it's exc' attribute is the original exception raised (if any). type_, _, tb = sys.exc_info() try: reason = self.retval einfo = ExceptionInfo((type_, reason, tb)) if store_errors: task.backend.mark_as_retry( req.id, reason.exc, einfo.traceback, request=req, ) task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) signals.task_retry.send(sender=task, request=req, reason=reason, einfo=einfo) info(LOG_RETRY, { 'id': req.id, 'name': task.name, 'exc': safe_repr(reason.exc), }) return einfo finally: del (tb)
def on_task(self, task, task_reserved=task_reserved): """Handle received task. If the task has an `eta` we enter it into the ETA schedule, otherwise we move it the ready queue for immediate processing. """ if task.revoked(): return if self._does_info: info('Got task from broker: %s', task) if self.event_dispatcher.enabled: self.event_dispatcher.send('task-received', uuid=task.id, name=task.name, args=safe_repr(task.args), kwargs=safe_repr(task.kwargs), retries=task.request_dict.get( 'retries', 0), eta=task.eta and task.eta.isoformat(), expires=task.expires and task.expires.isoformat()) if task.eta: eta = timezone.to_system(task.eta) if task.utc else task.eta try: eta = to_timestamp(eta) except OverflowError as exc: error("Couldn't convert eta %s to timestamp: %r. Task: %r", task.eta, exc, task.info(safe=True), exc_info=True) task.acknowledge() else: self.qos.increment_eventually() self.timer.apply_at( eta, self.apply_eta_task, (task, ), priority=6, ) else: task_reserved(task) self._quick_put(task)
def dump_reserved(panel, safe=False, **kwargs): reserved = state.reserved_requests - state.active_requests if not reserved: logger.debug("--Empty queue--") return [] if logger.isEnabledFor(logging.DEBUG): logger.debug("* Dump of currently reserved tasks:\n%s", "\n".join(safe_repr(id) for id in reserved)) return [request.info(safe=safe) for request in reserved]
def dump_reserved(panel, safe=False, **kwargs): reserved = state.reserved_requests if not reserved: logger.debug('--Empty queue--') return [] logger.debug('* Dump of currently reserved tasks:\n%s', '\n'.join(safe_repr(id) for id in reserved)) return [request.info(safe=safe) for request in reserved]
def on_retry(self, exc_info): """Handler called if the task should be retried.""" if self.task.acks_late: self.acknowledge() self.send_event('task-retried', exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback))
def success_handler(sender, result, *args, **kwargs): with transaction.atomic(): try: job_Locked = Job.objects.select_for_update().get(task_id=sender.request.id) except Job.DoesNotExist: raise ObjectDoesNotExist job_Locked.result = safe_repr(result) or "" job_Locked.save()
def on_failure(self, exc_info, send_failed_event=True, return_ok=False): """Handler called if the task raised an exception.""" task_ready(self) if isinstance(exc_info.exception, MemoryError): raise MemoryError("Process got: %s" % (exc_info.exception, )) elif isinstance(exc_info.exception, Reject): return self.reject(requeue=exc_info.exception.requeue) elif isinstance(exc_info.exception, Ignore): return self.acknowledge() exc = exc_info.exception if isinstance(exc, Retry): return self.on_retry(exc_info) # (acks_late) acknowledge after result stored. requeue = False if self.task.acks_late: reject = self.task.reject_on_worker_lost and isinstance( exc, WorkerLostError) ack = self.task.acks_on_failure_or_timeout if reject: requeue = True self.reject(requeue=requeue) send_failed_event = False elif ack: self.acknowledge() else: # supporting the behaviour where a task failed and # need to be removed from prefetched local queue self.reject(requeue=False) # These are special cases where the process would not have had time # to write the result. if isinstance(exc, Terminated): self._announce_revoked("terminated", True, string(exc), False) send_failed_event = False # already sent revoked event elif not requeue and (isinstance(exc, WorkerLostError) or not return_ok): # only mark as failure if task has not been requeued self.task.backend.mark_as_failure( self.id, exc, request=self._context, store_result=self.store_errors, ) if send_failed_event: self.send_event( "task-failed", exception=safe_repr(get_pickled_exception(exc_info.exception)), traceback=exc_info.traceback, ) if not return_ok: error("Task handler raised error: %r", exc, exc_info=exc_info.exc_info)
def task_message_handler(message, body, ack, reject, callbacks, to_timestamp=to_timestamp): req = Req(body, on_ack=ack, on_reject=reject, app=app, hostname=hostname, eventer=eventer, task=task, connection_errors=connection_errors, message=message) if req.revoked(): return if _does_info: info('Received task: %s', req) if task_sends_events: send_event( 'task-received', uuid=req.id, name=req.name, args=safe_repr(req.args), kwargs=safe_repr(req.kwargs), retries=req.request_dict.get('retries', 0), eta=req.eta and req.eta.isoformat(), expires=req.expires and req.expires.isoformat(), ) if req.eta: try: if req.utc: eta = to_timestamp(to_system_tz(req.eta)) else: eta = to_timestamp(req.eta, timezone.local) except OverflowError as exc: error("Couldn't convert eta %s to timestamp: %r. Task: %r", req.eta, exc, req.info(safe=True), exc_info=True) req.acknowledge() else: consumer.qos.increment_eventually() call_at(eta, apply_eta_task, (req, ), priority=6) else: if rate_limits_enabled: bucket = get_bucket(task.name) if bucket: return limit_task(req, bucket, 1) task_reserved(req) if callbacks: [callback() for callback in callbacks] handle(req)
def __init__(self, message=None, exc=None, when=None, **kwargs): from kombu.utils.encoding import safe_repr self.message = message if isinstance(exc, string_t): self.exc, self.excs = None, exc else: self.exc, self.excs = exc, safe_repr(exc) if exc else None self.when = when Exception.__init__(self, exc, when, **kwargs)
def success_handler(sender, result, *args, **kwargs): with transaction.atomic(): try: job_Locked = Job.objects.select_for_update().get( task_id=sender.request.id) except Job.DoesNotExist: raise ObjectDoesNotExist job_Locked.result = safe_repr(result) or '' job_Locked.save()
def _log_error(self, einfo): einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, internal, sargs, skwargs = ( safe_repr(einfo.exception), safe_str(einfo.traceback), einfo.exc_info, einfo.internal, safe_repr(self.args), safe_repr(self.kwargs), ) format = self.error_msg description = 'raised exception' severity = logging.ERROR self.send_event('task-failed', exception=exception, traceback=traceback) if internal: format = self.internal_error_msg description = 'INTERNAL ERROR' severity = logging.CRITICAL context = { 'hostname': self.hostname, 'id': self.id, 'name': self.name, 'exc': exception, 'traceback': traceback, 'args': sargs, 'kwargs': skwargs, 'description': description, } logger.log(severity, format.strip(), context, exc_info=exc_info, extra={'data': {'id': self.id, 'name': self.name, 'args': sargs, 'kwargs': skwargs, 'hostname': self.hostname, 'internal': internal}}) self.task.send_error_email(context, einfo.exception)
def apply_async(self, target, args=None, kwargs=None, **options): """Equivalent of the :func:`apply` built-in function. Callbacks should optimally return as soon as possible since otherwise the thread which handles the result will get blocked. """ kwargs = {} if not kwargs else kwargs args = [] if not args else args if self._does_debug: logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', target, truncate(safe_repr(args), 1024), truncate(safe_repr(kwargs), 1024)) return self.on_apply(target, args, kwargs, waitforslot=self.putlocks, callbacks_propagate=self.callbacks_propagate, **options)
def on_retry(self, exc_info): """Handler called if the task should be retried.""" self.send_event('task-retried', uuid=self.id, exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback)) if _does_info: info(self.retry_msg.strip(), { 'id': self.id, 'name': self.name, 'exc': exc_info.exception})
def task_message_handler(message, body, ack, to_timestamp=to_timestamp): req = Req(body, on_ack=ack, app=app, hostname=hostname, eventer=eventer, task=task, connection_errors=connection_errors, delivery_info=message.delivery_info) if req.revoked(): return if _does_info: info('Got task from broker: %s', req) if events: send_event( 'task-received', uuid=req.id, name=req.name, args=safe_repr(req.args), kwargs=safe_repr(req.kwargs), retries=req.request_dict.get('retries', 0), eta=req.eta and req.eta.isoformat(), expires=req.expires and req.expires.isoformat(), ) if req.eta: try: if req.utc: eta = to_timestamp(to_system_tz(req.eta)) else: eta = to_timestamp(req.eta, timezone.local) except OverflowError as exc: error("Couldn't convert eta %s to timestamp: %r. Task: %r", req.eta, exc, req.info(safe=True), exc_info=True) req.acknowledge() else: consumer.qos.increment_eventually() timer_apply_at( eta, apply_eta_task, (req, ), priority=6, ) else: if rate_limits_enabled: if bucket: return limit_task(req, bucket, 1) task_reserved(req) handle(req)
def _log_error(self, einfo): exception, traceback, exc_info, internal, sargs, skwargs = ( safe_repr(einfo.exception), safe_str(einfo.traceback), einfo.exc_info, einfo.internal, safe_repr(self.args), safe_repr(self.kwargs), ) format = self.error_msg description = 'raised exception' severity = logging.ERROR self.send_event('task-failed', uuid=self.id, exception=exception, traceback=traceback) if internal: format = self.internal_error_msg description = 'INTERNAL ERROR' severity = logging.CRITICAL context = { 'hostname': self.hostname, 'id': self.id, 'name': self.name, 'exc': exception, 'traceback': traceback, 'args': sargs, 'kwargs': skwargs, 'description': description, } logger.log(severity, format.strip(), context, exc_info=exc_info, extra={'data': {'id': self.id, 'name': self.name, 'args': sargs, 'kwargs': skwargs, 'hostname': self.hostname, 'internal': internal}}) self.task.send_error_email(context, einfo.exception)
def on_retry(self, exc_info): """Handler called if the task should be retried.""" if self.task.acks_late: self.acknowledge() self.send_event( "task-retried", exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback) ) if _does_info: info(self.retry_msg.strip(), {"id": self.id, "name": self.name, "exc": exc_info.exception})
def task_status(request, task_id): """Return task status and result in JSON format.""" result = AsyncResult(task_id) state, retval = result.state, result.result response_data = {'id': task_id, 'status': state, 'result': retval} if state in states.EXCEPTION_STATES: traceback = result.traceback response_data.update({'result': safe_repr(retval), 'exc': get_full_cls_name(retval.__class__), 'traceback': traceback}) return JsonResponse({'task': response_data})
def __init__(self, message=None, exc=None, when=None, is_eager=False, sig=None, **kwargs): from kombu.utils.encoding import safe_repr self.message = message if isinstance(exc, str): self.exc, self.excs = None, exc else: self.exc, self.excs = exc, safe_repr(exc) if exc else None self.when = when self.is_eager = is_eager self.sig = sig super().__init__(self, exc, when, **kwargs)
def on_retry(self, exc_info): """Handler called if the task should be retried.""" if self.task.acks_late: self.acknowledge() self.send_event('task-retried', exception=safe_repr(exc_info.exception.exc), traceback=safe_str(exc_info.traceback)) if _does_info: info(self.retry_msg.strip(), {'id': self.id, 'name': self.name, 'exc': exc_info.exception})
def _log_error(self, exc_info): format = self.error_msg description = "raised exception" severity = logging.ERROR self.send_event("task-failed", uuid=self.id, exception=safe_repr(exc_info.exception), traceback=safe_str(exc_info.traceback)) if exc_info.internal: format = self.internal_error_msg description = "INTERNAL ERROR" severity = logging.CRITICAL context = { "hostname": self.hostname, "id": self.id, "name": self.name, "exc": safe_repr(exc_info.exception), "traceback": safe_str(exc_info.traceback), "args": safe_repr(self.args), "kwargs": safe_repr(self.kwargs), "description": description } logger.log(severity, format.strip(), context, exc_info=exc_info.exc_info, extra={ "data": { "id": self.id, "name": self.name, "hostname": self.hostname } }) task_obj = self.app.tasks.get(self.name, object) task_obj.send_error_email(context, exc_info.exception)
def apply_async(self, target, args=[], kwargs={}, **options): """Equivalent of the :func:`apply` built-in function. Callbacks should optimally return as soon as possible since otherwise the thread which handles the result will get blocked. """ if len(args) == 5: task_id, task_name, retries = args[4]['id'], args[4]['task'], args[4]['retries'] else: task_id, task_name, retries = None, None, None logger_task.info("anan: Consumer-{}: Apply Task: {{task_id:{}, task_name:{}, retries:{}}}"\ .format(os.getpid(), task_id, task_name, retries)) if self._does_debug: logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', target, safe_repr(args), safe_repr(kwargs)) return self.on_apply(target, args, kwargs, waitforslot=self.putlocks, callbacks_propagate=self.callbacks_propagate, **options)
def on_failure(self, exc_info, send_failed_event=True, return_ok=False): """Handler called if the task raised an exception.""" task_ready(self) if isinstance(exc_info.exception, MemoryError): raise MemoryError('Process got: %s' % (exc_info.exception,)) elif isinstance(exc_info.exception, Reject): return self.reject(requeue=exc_info.exception.requeue) elif isinstance(exc_info.exception, Ignore): return self.acknowledge() exc = exc_info.exception if isinstance(exc, Retry): return self.on_retry(exc_info) # These are special cases where the process wouldn't've had # time to write the result. if isinstance(exc, Terminated): self._announce_revoked( 'terminated', True, string(exc), False) send_failed_event = False # already sent revoked event elif isinstance(exc, WorkerLostError) or not return_ok: self.task.backend.mark_as_failure( self.id, exc, request=self._context, store_result=self.store_errors, ) # (acks_late) acknowledge after result stored. if self.task.acks_late: reject = ( self.task.reject_on_worker_lost and isinstance(exc, WorkerLostError) ) ack = self.task.acks_on_failure_or_timeout if reject: requeue = not self.delivery_info.get('redelivered') self.reject(requeue=requeue) send_failed_event = False elif ack: self.acknowledge() if send_failed_event: self.send_event( 'task-failed', exception=safe_repr(get_pickled_exception(exc_info.exception)), traceback=exc_info.traceback, ) if not return_ok: error('Task handler raised error: %r', exc, exc_info=exc_info.exc_info)
def on_failure(self, exc_info, send_failed_event=True, return_ok=False): """Handler called if the task raised an exception.""" task_ready(self) if isinstance(exc_info.exception, MemoryError): raise MemoryError('Process got: %s' % (exc_info.exception, )) elif isinstance(exc_info.exception, Reject): return self.reject(requeue=exc_info.exception.requeue) elif isinstance(exc_info.exception, Ignore): return self.acknowledge() exc = exc_info.exception if isinstance(exc, Retry): return self.on_retry(exc_info) # These are special cases where the process wouldn't've had # time to write the result. if isinstance(exc, Terminated): self._announce_revoked('terminated', True, string(exc), False) send_failed_event = False # already sent revoked event elif isinstance(exc, WorkerLostError) or not return_ok: self.task.backend.mark_as_failure( self.id, exc, request=self._context, store_result=self.store_errors, ) # (acks_late) acknowledge after result stored. if self.task.acks_late: reject = (self.task.reject_on_worker_lost and isinstance(exc, WorkerLostError)) ack = self.task.acks_on_failure_or_timeout if reject: requeue = not self.delivery_info.get('redelivered') self.reject(requeue=requeue) send_failed_event = False elif ack: self.acknowledge() if send_failed_event: self.send_event( 'task-failed', exception=safe_repr(get_pickled_exception(exc_info.exception)), traceback=exc_info.traceback, ) if not return_ok: error('Task handler raised error: %r', exc, exc_info=exc_info.exc_info)
def dispatch(self, request, *args, **kwargs): self.nowait = kwargs.get("nowait", False) if request.method.lower() == "get": kwargs.pop("nowait", None) if self.nowait: return self.NotImplemented("Operation cannot be async.") try: data = super(ApiView, self).dispatch(request, *args, **kwargs) except NoRouteError: return HttpResponseNotFound() except NoReplyError: return HttpResponseTimeout() except Exception, exc: return Error({"nok": [safe_repr(exc), "".join(format_exception(*sys.exc_info()))]})
def dispatch(self, request, *args, **kwargs): self.nowait = kwargs.get('nowait', False) if request.method.lower() == 'get': kwargs.pop('nowait', None) if self.nowait: return self.NotImplemented('Operation cannot be async.') try: data = super(ApiView, self).dispatch(request, *args, **kwargs) except NoRouteError: return HttpResponseNotFound() except NoReplyError: return HttpResponseTimeout() except Exception, exc: return Error({'nok': [safe_repr(exc), ''.join(format_exception(*sys.exc_info()))]})
def on_decode_error(self, message, exc): """Callback called if an error occurs while decoding a message. Simply logs the error and acknowledges the message so it doesn't enter a loop. Arguments: message (Message): The message received. exc (Exception): The exception being handled. """ crit(MESSAGE_DECODE_ERROR, exc, message.content_type, message.content_encoding, safe_repr(message.headers), dump_body(message, message.body), exc_info=1) message.ack()
def task_progress(self): task_id = self.task_id if task_id is None: return None result = AsyncResult(task_id) state, retval = result.state, result.result data = {'id': task_id, 'status': state, 'result': retval} if state in states.EXCEPTION_STATES: traceback = result.traceback data.update({ 'result': safe_repr(retval), 'exc': get_full_cls_name(retval.__class__), 'traceback': traceback }) return data
def on_decode_error(self, message, exc): """Callback called if an error occurs while decoding a message received. Simply logs the error and acknowledges the message so it doesn't enter a loop. :param message: The message with errors. :param exc: The original exception instance. """ crit(MESSAGE_DECODE_ERROR, exc, message.content_type, message.content_encoding, safe_repr(message.headers), dump_body(message, message.body), exc_info=1) message.ack()
def _DISPATCH(self, body, ticket=None): """Dispatch message to the appropriate method in :attr:`state`, handle possible exceptions, and return a response suitable to be used in a reply. To protect from calling special methods it does not dispatch method names starting with underscore (``_``). This returns the return value or exception error with defaults fields in a suitable format to be used as a reply. The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt` will not be handled, and will propagate. In the case of a successful call the return value will be:: {'ok': return_value, **default_fields} If the method raised an exception the return value will be:: {'nok': [repr exc, str traceback], **default_fields} :raises KeyError: if the method specified is unknown or is a special method (name starting with underscore). """ if ticket: sticket = '%s' % (shortuuid(ticket), ) else: ticket = sticket = str(next(self.ticket_counter)) try: method, args = itemgetter('method', 'args')(body) self.log.info('#%s --> %s', sticket, self._reprcall(method, args)) act = self.lookup_action(method) r = {'ok': act(args or {})} self.log.info('#%s <-- %s', sticket, reprkwargs(r)) except self.Next: raise except Exception as exc: einfo = sys.exc_info() r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]} self.log.error('#%s <-- nok=%r', sticket, exc) return dict(self._default_fields, **r)
def ensure_serializable(items, encoder): """Ensure items will serialize. For a given list of arbitrary objects, return the object or a string representation, safe for serialization. Arguments: items (Iterable[Any]): Objects to serialize. encoder (Callable): Callable function to serialize with. """ safe_exc_args = [] for arg in items: try: encoder(arg) safe_exc_args.append(arg) except Exception: # pylint: disable=broad-except safe_exc_args.append(safe_repr(arg)) return tuple(safe_exc_args)
def on_retry(self, exc): super(MyRequest, self).on_retry(exc) hostname = safe_str(self.hostname) queue = safe_str(self.delivery_info.get('routing_key')) taskname = safe_str(self.task.name) taskid = safe_str(self.id) payload = self.argsrepr payload = self.decodeHtml(payload) excep = safe_repr(exc.exception) excep = self.decodeHtml(excep) trace = safe_str(exc.traceback) trace = self.decodeHtml(trace) stime = self.info()['time_start'] newstime = datetime.datetime.fromtimestamp(stime) ctime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') type = 1 CDatabase = CeleryDatabases() CDatabase.inster_fail_task(hostname, queue, type, taskname, taskid, payload, excep, trace, newstime, ctime)
def test_on_retry(self): job = self.get_request(self.mytask.s(1, f='x')) job.eventer = Mock(name='.eventer') try: raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() job.on_failure(einfo) job.eventer.send.assert_called_with( 'task-retried', uuid=job.id, exception=safe_repr(einfo.exception.exc), traceback=safe_str(einfo.traceback), ) prev, module._does_info = module._does_info, False try: job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True job.on_failure(einfo)
def _message_report(self, body, message): return MESSAGE_REPORT.format(dump_body(message, body), safe_repr(message.content_type), safe_repr(message.content_encoding), safe_repr(message.delivery_info), safe_repr(message.headers))
def dump_body(m, body): """Format message body for debugging purposes.""" # v2 protocol does not deserialize body body = m.body if body is None else body return '{} ({}b)'.format(truncate(safe_repr(body), 1024), len(m.body))
def repr_result(self, result, maxlen=RESULT_MAXLEN): # 46 is the length needed to fit # 'the quick brown fox jumps over the lazy dog' :) if not isinstance(result, string_t): result = safe_repr(result) return truncate(result) if len(result) > maxlen else result