def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, group_id=None, retries=0, chord=None, reply_to=None, time_limit=None, soft_time_limit=None, root_id=None, parent_id=None, route_name=None, shadow=None, chain=None, **options): """Send task by name. :param name: Name of task to call (e.g. `"tasks.add"`). :keyword result_cls: Specify custom result class. Default is using :meth:`AsyncResult`. Otherwise supports the same arguments as :meth:`@-Task.apply_async`. """ parent = have_parent = None amqp = self.amqp task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or amqp.router conf = self.conf if conf.task_always_eager: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'task_always_eager has no effect on send_task', ), stacklevel=2) options = router.route(options, route_name or name, args, kwargs) if root_id is None: parent, have_parent = self.current_worker_task, True if parent: root_id = parent.request.root_id or parent.request.id if parent_id is None: if not have_parent: parent, have_parent = self.current_worker_task, True if parent: parent_id = parent.request.id message = amqp.create_task_message( task_id, name, args, kwargs, countdown, eta, group_id, expires, retries, chord, maybe_list(link), maybe_list(link_error), reply_to or self.oid, time_limit, soft_time_limit, self.conf.task_send_sent_event, root_id, parent_id, shadow, chain, ) if connection: producer = amqp.Producer(connection) with self.producer_or_acquire(producer) as P: self.backend.on_task_call(P, task_id) amqp.send_task_message(P, name, message, **options) result = (result_cls or self.AsyncResult)(task_id) if add_to_parent: if not have_parent: parent, have_parent = self.current_worker_task, True if parent: parent.add_trail(result) return result
def import_default_modules(self): return [ self.import_task_module(m) for m in ( set(maybe_list(self.app.conf.CELERY_IMPORTS)) | set(maybe_list(self.app.conf.CELERY_INCLUDE)) | self.builtin_modules) ]
def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, reply_to=None, **options): task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or self.amqp.router conf = self.conf if conf.CELERY_ALWAYS_EAGER: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'CELERY_ALWAYS_EAGER has no effect on send_task', ), stacklevel=2) options = router.route(options, name, args, kwargs) if connection: producer = self.amqp.TaskProducer(connection) with self.producer_or_acquire(producer) as P: self.backend.on_task_call(P, task_id) task_id = P.publish_task( name, args, kwargs, countdown=countdown, eta=eta, task_id=task_id, expires=expires, callbacks=maybe_list(link), errbacks=maybe_list(link_error), reply_to=reply_to or self.oid, **options ) result = (result_cls or self.AsyncResult)(task_id) if add_to_parent: parent = get_current_worker_task() if parent: parent.add_trail(result) return result
def apply(self, args=None, kwargs=None, link=None, link_error=None, **options): """Execute this task locally, by blocking until the task returns. :param args: positional arguments passed on to the task. :param kwargs: keyword arguments passed on to the task. :keyword throw: Re-raise task exceptions. Defaults to the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` setting. :rtype :class:`celery.result.EagerResult`: """ # trace imports Task, so need to import inline. from celery.app.trace import eager_trace_task app = self._get_app() args = args or () # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__, ) + tuple(args) kwargs = kwargs or {} task_id = options.get('task_id') or uuid() retries = options.get('retries', 0) throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS', options.pop('throw', None)) # Make sure we get the task instance, not class. task = app._tasks[self.name] request = {'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': options.get('logfile'), 'loglevel': options.get('loglevel', 0), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'delivery_info': {'is_eager': True}} if self.accept_magic_kwargs: default_kwargs = {'task_name': task.name, 'task_id': task_id, 'task_retries': retries, 'task_is_eager': True, 'logfile': options.get('logfile'), 'loglevel': options.get('loglevel', 0), 'delivery_info': {'is_eager': True}} supported_keys = fun_takes_kwargs(task.run, default_kwargs) extend_with = dict((key, val) for key, val in items(default_kwargs) if key in supported_keys) kwargs.update(extend_with) tb = None retval, info = eager_trace_task(task, task_id, args, kwargs, app=self._get_app(), request=request, propagate=throw) if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if info is None else info.state return EagerResult(task_id, retval, state, traceback=tb)
def import_default_modules(self): signals.import_modules.send(sender=self.app) return [ self.import_task_module(m) for m in ( tuple(self.builtin_modules) + tuple(maybe_list(self.app.conf.CELERY_IMPORTS)) + tuple(maybe_list(self.app.conf.CELERY_INCLUDE)) ) ]
def import_default_modules(self): signals.import_modules.send(sender=self.app) return [ self.import_task_module(m) for m in ( tuple(self.builtin_modules) + tuple(maybe_list(self.app.conf.imports)) + tuple(maybe_list(self.app.conf.include)) ) ]
def apply(self, args=None, kwargs=None, link=None, link_error=None, task_id=None, retries=None, throw=None, logfile=None, loglevel=None, headers=None, **options): """Execute this task locally, by blocking until the task returns. Arguments: args (Tuple): positional arguments passed on to the task. kwargs (Dict): keyword arguments passed on to the task. throw (bool): Re-raise task exceptions. Defaults to the :setting:`task_eager_propagates` setting. Returns: celery.result.EagerResult: pre-evaluated result. """ # trace imports Task, so need to import inline. from celery.app.trace import build_tracer app = self._get_app() args = args or () # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__,) + tuple(args) kwargs = kwargs or {} task_id = task_id or uuid() retries = retries or 0 if throw is None: throw = app.conf.task_eager_propagates # Make sure we get the task instance, not class. task = app._tasks[self.name] request = { 'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': logfile, 'loglevel': loglevel or 0, 'hostname': gethostname(), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'headers': headers, 'delivery_info': {'is_eager': True}, } tb = None tracer = build_tracer( task.name, task, eager=True, propagate=throw, app=self._get_app(), ) ret = tracer(task_id, args, kwargs, request) retval = ret.retval if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if ret.info is None else ret.info.state return EagerResult(task_id, retval, state, traceback=tb)
def apply(self, args=None, kwargs=None, link=None, link_error=None, **options): """Execute this task locally, by blocking until the task returns. :param args: positional arguments passed on to the task. :param kwargs: keyword arguments passed on to the task. :keyword throw: Re-raise task exceptions. Defaults to the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` setting. :rtype :class:`celery.result.EagerResult`: """ # trace imports Task, so need to import inline. from billiard.einfo import ExceptionInfo from celery.app.trace import build_tracer app = self._get_app() args = args or () # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__, ) + tuple(args) kwargs = kwargs or {} task_id = options.get('task_id') or uuid() retries = options.get('retries', 0) throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS', options.pop('throw', None)) # Make sure we get the task instance, not class. task = app._tasks[self.name] request = {'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': options.get('logfile'), 'loglevel': options.get('loglevel', 0), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'headers': options.get('headers'), 'delivery_info': {'is_eager': True}} tb = None tracer = build_tracer( task.name, task, eager=True, propagate=throw, app=self._get_app(), ) ret = tracer(task_id, args, kwargs, request) retval = ret.retval if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if ret.info is None else ret.info.state return EagerResult(task_id, retval, state, traceback=tb)
def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id.""" # supports list argument since 3.1 task_ids, task_id = maybe_list(task_id) or [], None to_terminate = set() terminated = set() for task_id in task_ids: revoked.add(task_id) if terminate: to_terminate.add(task_id) if to_terminate: signum = _signals.signum(signal or "TERM") _to_terminate = set() # reserved_requests changes size during iteration # so need to consume the items first, then terminate after. requests = set(_find_requests_by_id(to_terminate, worker_state.reserved_requests)) for request in requests: logger.info("Terminating %s (%s)", task_id, signum) request.terminate(state.consumer.pool, signal=signum) terminated.add(request.id) if len(terminated) >= len(_to_terminate): break if not terminated: return {"ok": "terminate: tasks unknown"} return {"ok": "terminate: {0}".format(", ".join(terminated))} idstr = ", ".join(task_ids) logger.info("Tasks flagged as revoked: %s", idstr) return {"ok": "tasks {0} flagged as revoked".format(idstr)}
def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id.""" # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None size = len(task_ids) terminated = set() revoked.update(task_ids) if terminate: signum = _signals.signum(signal or 'TERM') # reserved_requests changes size during iteration # so need to consume the items first, then terminate after. requests = set(_find_requests_by_id( task_ids, worker_state.reserved_requests, )) for request in requests: if request.id not in terminated: terminated.add(request.id) logger.info('Terminating %s (%s)', request.id, signum) request.terminate(state.consumer.pool, signal=signum) if len(terminated) >= size: break if not terminated: return {'ok': 'terminate: tasks unknown'} return {'ok': 'terminate: {0}'.format(', '.join(terminated))} idstr = ', '.join(task_ids) logger.info('Tasks flagged as revoked: %s', idstr) return {'ok': 'tasks {0} flagged as revoked'.format(idstr)}
def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id.""" # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None size = len(task_ids) terminated = set() revoked.update(task_ids) if terminate: signum = _signals.signum(signal or TERM_SIGNAME) for request in _find_requests_by_id(task_ids): if request.id not in terminated: terminated.add(request.id) logger.info('Terminating %s (%s)', request.id, signum) request.terminate(state.consumer.pool, signal=signum) if len(terminated) >= size: break if not terminated: return ok('terminate: tasks unknown') return ok('terminate: {0}'.format(', '.join(terminated))) idstr = ', '.join(task_ids) logger.info('Tasks flagged as revoked: %s', idstr) return ok('tasks {0} flagged as revoked'.format(idstr))
def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id (or list of ids). Keyword Arguments: terminate (bool): Also terminate the process if the task is active. signal (str): Name of signal to use for terminate (e.g., ``KILL``). """ # pylint: disable=redefined-outer-name # XXX Note that this redefines `terminate`: # Outside of this scope that is a function. # supports list argument since 3.1 task_ids, task_id = set(maybe_list(task_id) or []), None size = len(task_ids) terminated = set() worker_state.revoked.update(task_ids) if terminate: signum = _signals.signum(signal or TERM_SIGNAME) for request in _find_requests_by_id(task_ids): if request.id not in terminated: terminated.add(request.id) logger.info('Terminating %s (%s)', request.id, signum) request.terminate(state.consumer.pool, signal=signum) if len(terminated) >= size: break if not terminated: return ok('terminate: tasks unknown') return ok('terminate: {0}'.format(', '.join(terminated))) idstr = ', '.join(task_ids) logger.info('Tasks flagged as revoked: %s', idstr) return ok('tasks {0} flagged as revoked'.format(idstr))
def apply(self, args=None, kwargs=None, link=None, link_error=None, **options): """Execute this task locally, by blocking until the task returns. :param args: positional arguments passed on to the task. :param kwargs: keyword arguments passed on to the task. :keyword throw: Re-raise task exceptions. Defaults to the :setting:`task_eager_propagates` setting. :rtype :class:`celery.result.EagerResult`: """ # trace imports Task, so need to import inline. from celery.app.trace import build_tracer app = self._get_app() args = args or () # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__,) + tuple(args) kwargs = kwargs or {} task_id = options.get("task_id") or uuid() retries = options.get("retries", 0) throw = app.either("task_eager_propagates", options.pop("throw", None)) # Make sure we get the task instance, not class. task = app._tasks[self.name] request = { "id": task_id, "retries": retries, "is_eager": True, "logfile": options.get("logfile"), "loglevel": options.get("loglevel", 0), "callbacks": maybe_list(link), "errbacks": maybe_list(link_error), "headers": options.get("headers"), "delivery_info": {"is_eager": True}, } tb = None tracer = build_tracer(task.name, task, eager=True, propagate=throw, app=self._get_app()) ret = tracer(task_id, args, kwargs, request) retval = ret.retval if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if ret.info is None else ret.info.state return EagerResult(task_id, retval, state, traceback=tb)
def flatten_links(self): """Return a recursive list of dependencies (unchain if you will, but with links intact).""" return list(_chain.from_iterable(_chain( [[self]], (link.flatten_links() for link in maybe_list(self.options.get('link')) or []) )))
def _configured_for_event(self, name, sender=None): for sub in maybe_list( self.app.settings.THORN_SUBSCRIBERS.get(name) or []): if isinstance(sub, Callable): for subsub in sub(name, sender=sender): yield subsub else: yield sub
def query_task(state, ids, **kwargs): ids = maybe_list(ids) return dict({ req.id: ('reserved', req.info()) for req in _find_requests_by_id(ids, worker_state.reserved_requests) }, **{ req.id: ('active', req.info()) for req in _find_requests_by_id(ids, worker_state.active_requests) })
def battery_watcher(self, consumer): try: topic_list = consumer.app.ros_node_client.topics() if self.battery_topic not in topic_list: _logger.warn("Topic {battery_topic} not detected. giving up.".format(battery_topic=self.battery_topic)) return try: battery_msg = consumer.app.ros_node_client.topic_extract( self.battery_topic ) # we assume standard sensor message structure here battpct = battery_msg.get(consumer.app.conf.CELEROS_BATTERY_LEVEL_FIELD, {}) if battpct is None: _logger.warn("Battery percentage not found in battery message : {0}".format(battery_msg)) else: # _logger.info("Watching Battery : {0}% ".format(battpct)) # enabling/disabling consumer to queues bound by battery requirements for bpct, q in maybe_list(consumer.app.conf.CELEROS_MIN_BATTERY_PCT_QUEUE): if isinstance(q, kombu.Queue): qname = q.name else: # assumed str qname = q for kq in consumer.app.conf.CELERY_QUEUES: if kq.name == qname: # we find a queue with the same name already configured. we should use it. q = kq break # to stop consuming from a queue we only need the queue name if battpct < bpct and consumer.task_consumer.consuming_from(qname): _logger.warn("Battery Low {0}%. Ignoring task queue {1} until battery is recharged.".format(battpct, qname)) consumer.cancel_task_queue(qname) elif not battpct < bpct and not consumer.task_consumer.consuming_from(qname): # To listen to a queue we might need to create it. # We should reuse the ones from config if possible if isinstance(q, kombu.Queue): consumer.add_task_queue(q) else: # if we didnt find the queue among the configured queues, we need to create it. consumer.add_task_queue( queue=qname, # it seems consumer is not applying the default from config from here? exchange=consumer.app.conf.CELERY_DEFAULT_EXCHANGE, exchange_type=consumer.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE, rounting_key=consumer.app.conf.CELERY_DEFAULT_ROUTING_KEY, ) except pyros.PyrosServiceTimeout as exc: _logger.warn("Failed to get battery levels. giving up.") return except pyros.PyrosServiceTimeout as exc: _logger.warn("Failed to lookup topics. giving up.")
def query_task(state, ids, **kwargs): ids = maybe_list(ids) def reqinfo(state, req): return state, req.info() reqs = {req.id: ("reserved", req.info()) for req in _find_requests_by_id(ids, worker_state.reserved_requests)} reqs.update({req.id: ("active", req.info()) for req in _find_requests_by_id(ids, worker_state.active_requests)}) return reqs
def traverse_subscribers(it, *args, **kwargs): stream = deque([it]) while stream: for node in maybe_list(stream.popleft()): if isinstance(node, string_types) and node.startswith('!'): node = symbol_by_name(node[1:]) if isinstance(node, Callable): node = node(*args, **kwargs) if is_list(node): stream.append(node) elif node: yield node
def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, reply_to=None, **options): task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or self.amqp.router conf = self.conf options = router.route(options, name, args, kwargs) if connection: producer = self.amqp.TaskProducer(connection) with self.producer_or_acquire(producer) as P: self.backend.on_task_call(P, task_id) publish_future = P.publish_task( name, args, kwargs, countdown=countdown, eta=eta, task_id=task_id, expires=expires, callbacks=maybe_list(link), errbacks=maybe_list(link_error), reply_to=reply_to or self.oid, **options ) return publish_future
def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, group_id=None, retries=0, chord=None, reply_to=None, time_limit=None, soft_time_limit=None, root_id=None, parent_id=None, route_name=None, shadow=None, **options): amqp = self.amqp task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or amqp.router conf = self.conf if conf.CELERY_ALWAYS_EAGER: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'CELERY_ALWAYS_EAGER has no effect on send_task', ), stacklevel=2) options = router.route(options, route_name or name, args, kwargs) message = amqp.create_task_message( task_id, name, args, kwargs, countdown, eta, group_id, expires, retries, chord, maybe_list(link), maybe_list(link_error), reply_to or self.oid, time_limit, soft_time_limit, self.conf.CELERY_SEND_TASK_SENT_EVENT, root_id, parent_id, shadow, ) if connection: producer = amqp.Producer(connection) with self.producer_or_acquire(producer) as P: self.backend.on_task_call(P, task_id) amqp.send_task_message(P, name, message, **options) result = (result_cls or self.AsyncResult)(task_id) if add_to_parent: parent = get_current_worker_task() if parent: parent.add_trail(result) return result
def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, result_cls=None, expires=None, queues=None, publisher=None, link=None, link_error=None, **options): producer = producer or publisher # XXX compat if self.conf.CELERY_ALWAYS_EAGER: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'CELERY_ALWAYS_EAGER has no effect on send_task')) result_cls = result_cls or self.AsyncResult router = self.amqp.Router(queues) options.setdefault('compression', self.conf.CELERY_MESSAGE_COMPRESSION) options = router.route(options, name, args, kwargs) with self.producer_or_acquire(producer) as producer: return result_cls(producer.publish_task( name, args, kwargs, task_id=task_id, countdown=countdown, eta=eta, callbacks=maybe_list(link), errbacks=maybe_list(link_error), expires=expires, **options ))
def query_task(state, ids, **kwargs): ids = maybe_list(ids) def reqinfo(state, req): return state, req.info() reqs = dict((req.id, ('reserved', req.info())) for req in _find_requests_by_id( ids, worker_state.reserved_requests)) reqs.update(dict( (req.id, ('active', req.info())) for req in _find_requests_by_id( ids, worker_state.active_requests, ) )) return reqs
def prepare_steps(self, args, tasks, root_id=None, parent_id=None, link_error=None, app=None, last_task_id=None, group_id=None, chord_body=None, clone=True, from_dict=Signature.from_dict): app = app or self.app # use chain message field for protocol 2 and later. # this avoids pickle blowing the stack on the recursion # required by linking task together in a tree structure. # (why is pickle using recursion? or better yet why cannot python # do tail call optimization making recursion actually useful?) use_link = self._use_link if use_link is None and app.conf.task_protocol == 1: use_link = True steps = deque(tasks) steps_pop = steps.pop steps_extend = steps.extend prev_task = None prev_res = prev_prev_res = None tasks, results = [], [] i = 0 while steps: task = steps_pop() is_first_task, is_last_task = not steps, not i if not isinstance(task, abstract.CallableSignature): task = from_dict(task, app=app) if isinstance(task, group): task = maybe_unroll_group(task) # first task gets partial args from chain if clone: task = task.clone(args) if is_first_task else task.clone() elif is_first_task: task.args = tuple(args) + tuple(task.args) if isinstance(task, chain): # splice the chain steps_extend(task.tasks) continue if isinstance(task, group) and prev_task: # automatically upgrade group(...) | s to chord(group, s) # for chords we freeze by pretending it's a normal # signature instead of a group. tasks.pop() results.pop() task = chord( task, body=prev_task, task_id=prev_res.task_id, root_id=root_id, app=app, ) prev_res = prev_prev_res if is_last_task: # chain(task_id=id) means task id is set for the last task # in the chain. If the chord is part of a chord/group # then that chord/group must synchronize based on the # last task in the chain, so we only set the group_id and # chord callback for the last task. res = task.freeze( last_task_id, root_id=root_id, group_id=group_id, chord=chord_body, ) else: res = task.freeze(root_id=root_id) i += 1 if prev_task: prev_task.set_parent_id(task.id) if use_link: # link previous task to this task. task.link(prev_task) if prev_res: prev_res.parent = res if is_first_task and parent_id is not None: task.set_parent_id(parent_id) if link_error: for errback in maybe_list(link_error): task.link_error(errback) tasks.append(task) results.append(res) prev_task, prev_prev_res, prev_res = ( task, prev_res, res, ) if root_id is None and tasks: root_id = tasks[-1].id for task in reversed(tasks): task.options['root_id'] = root_id return tasks, results
def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, group_id=None, retries=0, chord=None, reply_to=None, time_limit=None, soft_time_limit=None, root_id=None, parent_id=None, route_name=None, shadow=None, chain=None, task_type=None, **options): """Send task by name. Supports the same arguments as :meth:`@-Task.apply_async`. Arguments: name (str): Name of task to call (e.g., `"tasks.add"`). result_cls (AsyncResult): Specify custom result class. """ parent = have_parent = None amqp = self.amqp task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or amqp.router conf = self.conf if conf.task_always_eager: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'task_always_eager has no effect on send_task', ), stacklevel=2) ignored_result = options.pop('ignore_result', False) options = router.route(options, route_name or name, args, kwargs, task_type) if not root_id or not parent_id: parent = self.current_worker_task if parent: if not root_id: root_id = parent.request.root_id or parent.request.id if not parent_id: parent_id = parent.request.id if conf.task_inherit_parent_priority: options.setdefault( 'priority', parent.request.delivery_info.get('priority')) message = amqp.create_task_message( task_id, name, args, kwargs, countdown, eta, group_id, expires, retries, chord, maybe_list(link), maybe_list(link_error), reply_to or self.oid, time_limit, soft_time_limit, self.conf.task_send_sent_event, root_id, parent_id, shadow, chain, argsrepr=options.get('argsrepr'), kwargsrepr=options.get('kwargsrepr'), ) if connection: producer = amqp.Producer(connection, auto_declare=False) with self.producer_or_acquire(producer) as P: with P.connection._reraise_as_library_errors(): if not ignored_result: self.backend.on_task_call(P, task_id) amqp.send_task_message(P, name, message, **options) result = (result_cls or self.AsyncResult)(task_id) # We avoid using the constructor since a custom result class # can be used, in which case the constructor may still use # the old signature. result.ignored = ignored_result if add_to_parent: if not have_parent: parent, have_parent = self.current_worker_task, True if parent: parent.add_trail(result) return result
def apply(self, args=None, kwargs=None, link=None, link_error=None, **options): """Execute this task locally, by blocking until the task returns. :param args: positional arguments passed on to the task. :param kwargs: keyword arguments passed on to the task. :keyword throw: Re-raise task exceptions. Defaults to the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` setting. :rtype :class:`celery.result.EagerResult`: """ # trace imports Task, so need to import inline. from celery.app.trace import eager_trace_task app = self._get_app() args = args or () # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__, ) + tuple(args) kwargs = kwargs or {} task_id = options.get('task_id') or uuid() retries = options.get('retries', 0) throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS', options.pop('throw', None)) # Make sure we get the task instance, not class. task = app._tasks[self.name] request = { 'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': options.get('logfile'), 'loglevel': options.get('loglevel', 0), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'delivery_info': { 'is_eager': True } } if self.accept_magic_kwargs: default_kwargs = { 'task_name': task.name, 'task_id': task_id, 'task_retries': retries, 'task_is_eager': True, 'logfile': options.get('logfile'), 'loglevel': options.get('loglevel', 0), 'delivery_info': { 'is_eager': True } } supported_keys = fun_takes_kwargs(task.run, default_kwargs) extend_with = dict((key, val) for key, val in items(default_kwargs) if key in supported_keys) kwargs.update(extend_with) tb = None retval, info = eager_trace_task(task, task_id, args, kwargs, app=self._get_app(), request=request, propagate=throw) if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if info is None else info.state return EagerResult(task_id, retval, state, traceback=tb)
def add(self, fds, callback, flags): for fd in maybe_list(fds, None): try: self._add(fd, callback, flags) except ValueError: self._discard(fd)
def query_task(state, ids, **kwargs): """Query for task information by id.""" return { req.id: (_state_of_task(req), req.info()) for req in _find_requests_by_id(maybe_list(ids)) }
def query_task(state, ids, **kwargs): return { req.id: (_state_of_task(req), req.info()) for req in _find_requests_by_id(maybe_list(ids)) }
def battery_watcher(self, consumer): try: topic_list = consumer.app.ros_node_client.topics() if self.battery_topic not in topic_list: _logger.warn( "Topic {battery_topic} not detected. giving up.".format( battery_topic=self.battery_topic)) return try: battery_msg = consumer.app.ros_node_client.topic_extract( self.battery_topic) # we assume standard sensor message structure here battpct = battery_msg.get( consumer.app.conf.CELEROS_BATTERY_LEVEL_FIELD, {}) if battpct is None: _logger.warn( "Battery percentage not found in battery message : {0}" .format(battery_msg)) else: # _logger.info("Watching Battery : {0}% ".format(battpct)) # enabling/disabling consumer to queues bound by battery requirements for bpct, q in maybe_list( consumer.app.conf.CELEROS_MIN_BATTERY_PCT_QUEUE): if isinstance(q, kombu.Queue): qname = q.name else: # assumed str qname = q for kq in consumer.app.conf.CELERY_QUEUES: if kq.name == qname: # we find a queue with the same name already configured. we should use it. q = kq break # to stop consuming from a queue we only need the queue name if battpct < bpct and consumer.task_consumer.consuming_from( qname): _logger.warn( "Battery Low {0}%. Ignoring task queue {1} until battery is recharged." .format(battpct, qname)) consumer.cancel_task_queue(qname) elif not battpct < bpct and not consumer.task_consumer.consuming_from( qname): # To listen to a queue we might need to create it. # We should reuse the ones from config if possible if isinstance(q, kombu.Queue): consumer.add_task_queue(q) else: # if we didnt find the queue among the configured queues, we need to create it. consumer.add_task_queue( queue=qname, # it seems consumer is not applying the default from config from here? exchange=consumer.app.conf. CELERY_DEFAULT_EXCHANGE, exchange_type=consumer.app.conf. CELERY_DEFAULT_EXCHANGE_TYPE, rounting_key=consumer.app.conf. CELERY_DEFAULT_ROUTING_KEY, ) except pyros.PyrosServiceTimeout as exc: _logger.warn("Failed to get battery levels. giving up.") return except pyros.PyrosServiceTimeout as exc: _logger.warn("Failed to lookup topics. giving up.")
def prepare_steps(self, args, kwargs, tasks, root_id=None, parent_id=None, link_error=None, app=None, last_task_id=None, group_id=None, chord_body=None, clone=True, from_dict=Signature.from_dict): app = app or self.app # use chain message field for protocol 2 and later. # this avoids pickle blowing the stack on the recursion # required by linking task together in a tree structure. # (why is pickle using recursion? or better yet why cannot python # do tail call optimization making recursion actually useful?) use_link = self._use_link if use_link is None and app.conf.task_protocol == 1: use_link = True steps = deque(tasks) steps_pop = steps.pop steps_extend = steps.extend prev_task = None prev_res = None tasks, results = [], [] i = 0 # NOTE: We are doing this in reverse order. # The result is a list of tasks in reverse order, that is # passed as the ``chain`` message field. # As it's reversed the worker can just do ``chain.pop()`` to # get the next task in the chain. while steps: task = steps_pop() is_first_task, is_last_task = not steps, not i if not isinstance(task, abstract.CallableSignature): task = from_dict(task, app=app) if isinstance(task, group): task = maybe_unroll_group(task) # first task gets partial args from chain if clone: if is_first_task: task = task.clone(args, kwargs) else: task = task.clone() elif is_first_task: task.args = tuple(args) + tuple(task.args) if isinstance(task, _chain): # splice the chain steps_extend(task.tasks) continue if isinstance(task, group) and prev_task: # automatically upgrade group(...) | s to chord(group, s) # for chords we freeze by pretending it's a normal # signature instead of a group. tasks.pop() results.pop() task = chord( task, body=prev_task, task_id=prev_res.task_id, root_id=root_id, app=app, ) if is_last_task: # chain(task_id=id) means task id is set for the last task # in the chain. If the chord is part of a chord/group # then that chord/group must synchronize based on the # last task in the chain, so we only set the group_id and # chord callback for the last task. res = task.freeze( last_task_id, root_id=root_id, group_id=group_id, chord=chord_body, ) else: res = task.freeze(root_id=root_id) i += 1 if prev_task: if use_link: # link previous task to this task. task.link(prev_task) if prev_res and not prev_res.parent: prev_res.parent = res if link_error: for errback in maybe_list(link_error): task.link_error(errback) tasks.append(task) results.append(res) prev_task, prev_res = task, res if isinstance(task, chord): app.backend.ensure_chords_allowed() # If the task is a chord, and the body is a chain # the chain has already been prepared, and res is # set to the last task in the callback chain. # We need to change that so that it points to the # group result object. node = res while node.parent: node = node.parent prev_res = node return tasks, results
def replace(self, sig): """Replace this task, with a new task inheriting the task id. Execution of the host task ends immediately and no subsequent statements will be run. .. versionadded:: 4.0 Arguments: sig (~@Signature): signature to replace with. Raises: ~@Ignore: This is always raised when called in asynchronous context. It is best to always use ``return self.replace(...)`` to convey to the reader that the task won't continue after being replaced. """ chord = self.request.chord if 'chord' in sig.options: raise ImproperlyConfigured( "A signature replacing a task must not be part of a chord" ) if isinstance(sig, group): sig |= self.app.tasks['celery.accumulate'].s(index=0).set( link=self.request.callbacks, link_error=self.request.errbacks, ) elif isinstance(sig, _chain): if not sig.tasks: raise ImproperlyConfigured( "Cannot replace with an empty chain" ) if self.request.chain: # We need to freeze the new signature with the current task's ID to # ensure that we don't disassociate the new chain from the existing # task IDs which would break previously constructed results # objects. sig.freeze(self.request.id) if "link" in sig.options: final_task_links = sig.tasks[-1].options.setdefault("link", []) final_task_links.extend(maybe_list(sig.options["link"])) # Construct the new remainder of the task by chaining the signature # we're being replaced by with signatures constructed from the # chain elements in the current request. for t in reversed(self.request.chain): sig |= signature(t, app=self.app) sig.set( chord=chord, group_id=self.request.group, group_index=self.request.group_index, root_id=self.request.root_id, ) sig.freeze(self.request.id) if self.request.is_eager: return sig.apply().get() else: sig.delay() raise Ignore('Replaced by new task')
def default_modules(self): return ( tuple(self.builtin_modules) + tuple(maybe_list(self.app.conf.imports)) + tuple(maybe_list(self.app.conf.include)) )
def extend_list_option(self, key, value): items = self._with_list_option(key) items.extend(maybe_list(value))
def replace(self, sig): """Replace this task, with a new task inheriting the task id. Execution of the host task ends immediately and no subsequent statements will be run. .. versionadded:: 4.0 Arguments: sig (Signature): signature to replace with. Raises: ~@Ignore: This is always raised when called in asynchronous context. It is best to always use ``return self.replace(...)`` to convey to the reader that the task won't continue after being replaced. """ chord = self.request.chord if 'chord' in sig.options: raise ImproperlyConfigured( "A signature replacing a task must not be part of a chord") if isinstance(sig, _chain) and not getattr(sig, "tasks", True): raise ImproperlyConfigured("Cannot replace with an empty chain") # Ensure callbacks or errbacks from the replaced signature are retained if isinstance(sig, group): # Groups get uplifted to a chord so that we can link onto the body sig |= self.app.tasks['celery.accumulate'].s(index=0) for callback in maybe_list(self.request.callbacks) or []: sig.link(callback) for errback in maybe_list(self.request.errbacks) or []: sig.link_error(errback) # If the replacement signature is a chain, we need to push callbacks # down to the final task so they run at the right time even if we # proceed to link further tasks from the original request below if isinstance(sig, _chain) and "link" in sig.options: final_task_links = sig.tasks[-1].options.setdefault("link", []) final_task_links.extend(maybe_list(sig.options["link"])) # We need to freeze the replacement signature with the current task's # ID to ensure that we don't disassociate it from the existing task IDs # which would break previously constructed results objects. sig.freeze(self.request.id) # Ensure the important options from the original signature are retained replaced_task_nesting = self.request.get('replaced_task_nesting', 0) + 1 sig.set(chord=chord, group_id=self.request.group, group_index=self.request.group_index, root_id=self.request.root_id, replaced_task_nesting=replaced_task_nesting) # If the task being replaced is part of a chain, we need to re-create # it with the replacement signature - these subsequent tasks will # retain their original task IDs as well for t in reversed(self.request.chain or []): sig |= signature(t, app=self.app) # Stamping sig with parents groups stamped_headers = self.request.stamped_headers if self.request.stamps: groups = self.request.stamps.get("groups") sig.stamp(visitor=GroupStampingVisitor( groups=groups, stamped_headers=stamped_headers)) # Finally, either apply or delay the new signature! if self.request.is_eager: return sig.apply().get() else: sig.delay() raise Ignore('Replaced by new task')
def apply(self, args=None, kwargs=None, link=None, link_error=None, task_id=None, retries=None, throw=None, logfile=None, loglevel=None, headers=None, **options): """Execute this task locally, by blocking until the task returns. Arguments: args (Tuple): positional arguments passed on to the task. kwargs (Dict): keyword arguments passed on to the task. throw (bool): Re-raise task exceptions. Defaults to the :setting:`task_eager_propagates` setting. Returns: celery.result.EagerResult: pre-evaluated result. """ # trace imports Task, so need to import inline. from celery.app.trace import build_tracer app = self._get_app() args = args or () kwargs = kwargs or {} task_id = task_id or uuid() retries = retries or 0 if throw is None: throw = app.conf.task_eager_propagates # Make sure we get the task instance, not class. task = app._tasks[self.name] request = { 'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': logfile, 'loglevel': loglevel or 0, 'hostname': gethostname(), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'headers': headers, 'delivery_info': { 'is_eager': True }, } tb = None tracer = build_tracer( task.name, task, eager=True, propagate=throw, app=self._get_app(), ) ret = tracer(task_id, args, kwargs, request) retval = ret.retval if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if ret.info is None else ret.info.state return EagerResult(task_id, retval, state, traceback=tb)
def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, router=None, result_cls=None, expires=None, publisher=None, link=None, link_error=None, add_to_parent=True, group_id=None, retries=0, chord=None, reply_to=None, time_limit=None, soft_time_limit=None, root_id=None, parent_id=None, **options): amqp = self.amqp task_id = task_id or uuid() producer = producer or publisher # XXX compat router = router or amqp.router conf = self.conf if conf.CELERY_ALWAYS_EAGER: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'CELERY_ALWAYS_EAGER has no effect on send_task', ), stacklevel=2) options = router.route(options, name, args, kwargs) message = amqp.create_task_message( task_id, name, args, kwargs, countdown, eta, group_id, expires, retries, chord, maybe_list(link), maybe_list(link_error), reply_to or self.oid, time_limit, soft_time_limit, self.conf.CELERY_SEND_TASK_SENT_EVENT, root_id, parent_id, ) if connection: producer = amqp.Producer(connection) with self.producer_or_acquire(producer) as P: self.backend.on_task_call(P, task_id) amqp.send_task_message(P, name, message, **options) result = (result_cls or self.AsyncResult)(task_id) if add_to_parent: parent = get_current_worker_task() if parent: parent.add_trail(result) return result
def flatten_links(self): """Gives a recursive list of dependencies (unchain if you will, but with links intact).""" return list(chain_from_iterable(_chain([[self]], (link.flatten_links() for link in maybe_list(self.options.get("link")) or []))))
def test_maybe_list(self): self.assertEqual(maybe_list(1), [1]) self.assertEqual(maybe_list([1]), [1]) self.assertIsNone(maybe_list(None))
def flatten_links(self): return list(_chain.from_iterable(_chain( [[self]], (link.flatten_links() for link in maybe_list(self.options.get('link')) or []) )))
def test_maybe_list(): assert maybe_list(1) == [1] assert maybe_list([1]) == [1] assert maybe_list(None) is None
def _configured_for_event(self, name, **context): return self._traverse_subscribers( maybe_list(self.app.settings.THORN_SUBSCRIBERS.get(name)) or [], name, **context)
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, connection=None, router=None, link=None, link_error=None, publisher=None, add_to_parent=True, **options): """Apply tasks asynchronously by sending a message. :keyword args: The positional arguments to pass on to the task (a :class:`list` or :class:`tuple`). :keyword kwargs: The keyword arguments to pass on to the task (a :class:`dict`) :keyword countdown: Number of seconds into the future that the task should execute. Defaults to immediate execution (do not confuse with the `immediate` flag, as they are unrelated). :keyword eta: A :class:`~datetime.datetime` object describing the absolute time and date of when the task should be executed. May not be specified if `countdown` is also supplied. (Do not confuse this with the `immediate` flag, as they are unrelated). :keyword expires: Either a :class:`int`, describing the number of seconds, or a :class:`~datetime.datetime` object that describes the absolute time and date of when the task should expire. The task will not be executed after the expiration time. :keyword connection: Re-use existing broker connection instead of establishing a new one. :keyword retry: If enabled sending of the task message will be retried in the event of connection loss or failure. Default is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY` setting. Note you need to handle the producer/connection manually for this to work. :keyword retry_policy: Override the retry policy used. See the :setting:`CELERY_TASK_PUBLISH_RETRY` setting. :keyword routing_key: The routing key used to route the task to a worker server. Defaults to the :attr:`routing_key` attribute. :keyword exchange: The named exchange to send the task to. Defaults to the :attr:`exchange` attribute. :keyword exchange_type: The exchange type to initialize the exchange if not already declared. Defaults to the :attr:`exchange_type` attribute. :keyword immediate: Request immediate delivery. Will raise an exception if the task cannot be routed to a worker immediately. (Do not confuse this parameter with the `countdown` and `eta` settings, as they are unrelated). Defaults to the :attr:`immediate` attribute. :keyword mandatory: Mandatory routing. Raises an exception if there's no running workers able to take on this task. Defaults to the :attr:`mandatory` attribute. :keyword priority: The task priority, a number between 0 and 9. Defaults to the :attr:`priority` attribute. :keyword serializer: A string identifying the default serialization method to use. Can be `pickle`, `json`, `yaml`, `msgpack` or any custom serialization method that has been registered with :mod:`kombu.serialization.registry`. Defaults to the :attr:`serializer` attribute. :keyword compression: A string identifying the compression method to use. Can be one of ``zlib``, ``bzip2``, or any custom compression methods registered with :func:`kombu.compression.register`. Defaults to the :setting:`CELERY_MESSAGE_COMPRESSION` setting. :keyword link: A single, or a list of subtasks to apply if the task exits successfully. :keyword link_error: A single, or a list of subtasks to apply if an error occurs while executing the task. :keyword producer: :class:[email protected]` instance to use. :keyword add_to_parent: If set to True (default) and the task is applied while executing another task, then the result will be appended to the parent tasks ``request.children`` attribute. :keyword publisher: Deprecated alias to ``producer``. .. note:: If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will be replaced by a local :func:`apply` call instead. """ producer = producer or publisher app = self._get_app() router = router or self.app.amqp.router conf = app.conf # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__, ) + tuple(args) if conf.CELERY_ALWAYS_EAGER: return self.apply(args, kwargs, task_id=task_id, **options) options = dict(extract_exec_options(self), **options) options = router.route(options, self.name, args, kwargs) if connection: producer = app.amqp.TaskProducer(connection) with app.default_producer(producer) as P: evd = None if conf.CELERY_SEND_TASK_SENT_EVENT: evd = app.events.Dispatcher(channel=P.channel, buffer_while_offline=False) task_id = P.delay_task(self.name, args, kwargs, task_id=task_id, event_dispatcher=evd, callbacks=maybe_list(link), errbacks=maybe_list(link_error), **options) result = self.AsyncResult(task_id) if add_to_parent: parent = get_current_worker_task() if parent: parent.request.children.append(result) return result
def flatten_links(self): return list(chain_from_iterable(_chain([[self]], (link.flatten_links() for link in maybe_list(self.options.get('link')) or []))))
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, connection=None, router=None, link=None, link_error=None, publisher=None, add_to_parent=True, **options): """Apply tasks asynchronously by sending a message. :keyword args: The positional arguments to pass on to the task (a :class:`list` or :class:`tuple`). :keyword kwargs: The keyword arguments to pass on to the task (a :class:`dict`) :keyword countdown: Number of seconds into the future that the task should execute. Defaults to immediate execution (do not confuse with the `immediate` flag, as they are unrelated). :keyword eta: A :class:`~datetime.datetime` object describing the absolute time and date of when the task should be executed. May not be specified if `countdown` is also supplied. (Do not confuse this with the `immediate` flag, as they are unrelated). :keyword expires: Either a :class:`int`, describing the number of seconds, or a :class:`~datetime.datetime` object that describes the absolute time and date of when the task should expire. The task will not be executed after the expiration time. :keyword connection: Re-use existing broker connection instead of establishing a new one. :keyword retry: If enabled sending of the task message will be retried in the event of connection loss or failure. Default is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY` setting. Note you need to handle the producer/connection manually for this to work. :keyword retry_policy: Override the retry policy used. See the :setting:`CELERY_TASK_PUBLISH_RETRY` setting. :keyword routing_key: Custom routing key used to route the task to a worker server. If in combination with a ``queue`` argument only used to specify custom routing keys to topic exchanges. :keyword queue: The queue to route the task to. This must be a key present in :setting:`CELERY_QUEUES`, or :setting:`CELERY_CREATE_MISSING_QUEUES` must be enabled. See :ref:`guide-routing` for more information. :keyword exchange: Named custom exchange to send the task to. Usually not used in combination with the ``queue`` argument. :keyword priority: The task priority, a number between 0 and 9. Defaults to the :attr:`priority` attribute. :keyword serializer: A string identifying the default serialization method to use. Can be `pickle`, `json`, `yaml`, `msgpack` or any custom serialization method that has been registered with :mod:`kombu.serialization.registry`. Defaults to the :attr:`serializer` attribute. :keyword compression: A string identifying the compression method to use. Can be one of ``zlib``, ``bzip2``, or any custom compression methods registered with :func:`kombu.compression.register`. Defaults to the :setting:`CELERY_MESSAGE_COMPRESSION` setting. :keyword link: A single, or a list of subtasks to apply if the task exits successfully. :keyword link_error: A single, or a list of subtasks to apply if an error occurs while executing the task. :keyword producer: :class:[email protected]` instance to use. :keyword add_to_parent: If set to True (default) and the task is applied while executing another task, then the result will be appended to the parent tasks ``request.children`` attribute. :keyword publisher: Deprecated alias to ``producer``. Also supports all keyword arguments supported by :meth:`kombu.messaging.Producer.publish`. .. note:: If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will be replaced by a local :func:`apply` call instead. """ producer = producer or publisher app = self._get_app() router = router or self.app.amqp.router conf = app.conf # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__, ) + tuple(args) if conf.CELERY_ALWAYS_EAGER: return self.apply(args, kwargs, task_id=task_id, link=link, link_error=link_error, **options) options = dict(extract_exec_options(self), **options) options = router.route(options, self.name, args, kwargs) if connection: producer = app.amqp.TaskProducer(connection) with app.producer_or_acquire(producer) as P: logger_task.info("anan: app-{} starts sending task:{}".\ format(os.getpid(), self.name)) task_id = P.publish_task(self.name, args, kwargs, task_id=task_id, callbacks=maybe_list(link), errbacks=maybe_list(link_error), **options) result = self.AsyncResult(task_id) logger_task.info("anan: app-{} ends sending task:{} with task_id:{}".\ format(os.getpid(), self.name, task_id)) if add_to_parent: parent = get_current_worker_task() if parent: parent.request.children.append(result) return result
def apply(self, args=None, kwargs=None, link=None, link_error=None, **options): """Execute this task locally, by blocking until the task returns. :param args: positional arguments passed on to the task. :param kwargs: keyword arguments passed on to the task. :keyword throw: Re-raise task exceptions. Defaults to the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` setting. :rtype :class:`celery.result.EagerResult`: """ # trace imports Task, so need to import inline. from celery.app.trace import build_tracer app = self._get_app() args = args or () # add 'self' if this is a bound method. if self.__self__ is not None: args = (self.__self__, ) + tuple(args) kwargs = kwargs or {} task_id = options.get('task_id') or uuid() retries = options.get('retries', 0) throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS', options.pop('throw', None)) # Make sure we get the task instance, not class. task = app._tasks[self.name] request = { 'id': task_id, 'retries': retries, 'is_eager': True, 'logfile': options.get('logfile'), 'loglevel': options.get('loglevel', 0), 'callbacks': maybe_list(link), 'errbacks': maybe_list(link_error), 'headers': options.get('headers'), 'delivery_info': { 'is_eager': True } } tb = None tracer = build_tracer( task.name, task, eager=True, propagate=throw, app=self._get_app(), ) ret = tracer(task_id, args, kwargs, request) retval = ret.retval if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback state = states.SUCCESS if ret.info is None else ret.info.state return EagerResult(task_id, retval, state, traceback=tb)