Beispiel #1
0
def setup_worker_optimizations(app, hostname=None):
    global trace_task_ret

    hostname = hostname or gethostname()

    # make sure custom Task.__call__ methods that calls super
    # will not mess up the request/task stack.
    _install_stack_protection()

    # all new threads start without a current app, so if an app is not
    # passed on to the thread it will fall back to the "default app",
    # which then could be the wrong app.  So for the worker
    # we set this to always return our app.  This is a hack,
    # and means that only a single app can be used for workers
    # running in the same process.
    app.set_current()
    set_default_app(app)

    # evaluate all task classes by finalizing the app.
    app.finalize()

    # set fast shortcut to task registry
    _localized[:] = [
        app._tasks,
        prepare_accept_content(app.conf.accept_content),
        hostname,
    ]

    trace_task_ret = _fast_trace_task
    from celery.worker import request as request_module
    request_module.trace_task_ret = _fast_trace_task
    request_module.__optimize__()
Beispiel #2
0
def setup_worker_optimizations(app, hostname=None):
    global trace_task_ret

    hostname = hostname or gethostname()

    # make sure custom Task.__call__ methods that calls super
    # will not mess up the request/task stack.
    _install_stack_protection()

    # all new threads start without a current app, so if an app is not
    # passed on to the thread it will fall back to the "default app",
    # which then could be the wrong app.  So for the worker
    # we set this to always return our app.  This is a hack,
    # and means that only a single app can be used for workers
    # running in the same process.
    app.set_current()
    set_default_app(app)

    # evaluate all task classes by finalizing the app.
    app.finalize()

    # set fast shortcut to task registry
    _localized[:] = [
        app._tasks,
        prepare_accept_content(app.conf.accept_content),
        hostname,
    ]

    trace_task_ret = _fast_trace_task
    from celery.worker import request as request_module
    request_module.trace_task_ret = _fast_trace_task
    request_module.__optimize__()
Beispiel #3
0
    def __init__(self, on_task_request,
                 init_callback=noop, hostname=None,
                 pool=None, app=None,
                 timer=None, controller=None, hub=None, amqheartbeat=None,
                 worker_options=None, disable_rate_limits=False,
                 initial_prefetch_count=2, prefetch_multiplier=1, **kwargs):
        self.app = app
        self.controller = controller
        self.init_callback = init_callback
        self.hostname = hostname or gethostname()
        self.pid = os.getpid()
        self.pool = pool
        self.timer = timer
        self.strategies = self.Strategies()
        self.conninfo = self.app.connection_for_read()
        self.connection_errors = self.conninfo.connection_errors
        self.channel_errors = self.conninfo.channel_errors
        self._restart_state = restart_state(maxR=5, maxT=1)

        self._does_info = logger.isEnabledFor(logging.INFO)
        self._limit_order = 0
        self.on_task_request = on_task_request
        self.on_task_message = set()
        self.amqheartbeat_rate = self.app.conf.broker_heartbeat_checkrate
        self.disable_rate_limits = disable_rate_limits
        self.initial_prefetch_count = initial_prefetch_count
        self.prefetch_multiplier = prefetch_multiplier

        # this contains a tokenbucket for each task type by name, used for
        # rate limits, or None if rate limits are disabled for that task.
        self.task_buckets = defaultdict(lambda: None)
        self.reset_rate_limits()

        self.hub = hub
        if self.hub:
            self.amqheartbeat = amqheartbeat
            if self.amqheartbeat is None:
                self.amqheartbeat = self.app.conf.broker_heartbeat
        else:
            self.amqheartbeat = 0

        if not hasattr(self, 'loop'):
            self.loop = loops.asynloop if hub else loops.synloop

        if _detect_environment() == 'gevent':
            # there's a gevent bug that causes timeouts to not be reset,
            # so if the connection timeout is exceeded once, it can NEVER
            # connect again.
            self.app.conf.broker_connection_timeout = None

        self._pending_operations = []

        self.steps = []
        self.blueprint = self.Blueprint(
            app=self.app, on_close=self.on_close,
        )
        self.blueprint.apply(self, **dict(worker_options or {}, **kwargs))
Beispiel #4
0
    def __init__(self, on_task_request,
                 init_callback=noop, hostname=None,
                 pool=None, app=None,
                 timer=None, controller=None, hub=None, amqheartbeat=None,
                 worker_options=None, disable_rate_limits=False,
                 initial_prefetch_count=2, prefetch_multiplier=1, **kwargs):
        self.app = app
        self.controller = controller
        self.init_callback = init_callback
        self.hostname = hostname or gethostname()
        self.pid = os.getpid()
        self.pool = pool
        self.timer = timer
        self.strategies = self.Strategies()
        self.conninfo = self.app.connection_for_read()
        self.connection_errors = self.conninfo.connection_errors
        self.channel_errors = self.conninfo.channel_errors
        self._restart_state = restart_state(maxR=5, maxT=1)

        self._does_info = logger.isEnabledFor(logging.INFO)
        self._limit_order = 0
        self.on_task_request = on_task_request
        self.on_task_message = set()
        self.amqheartbeat_rate = self.app.conf.broker_heartbeat_checkrate
        self.disable_rate_limits = disable_rate_limits
        self.initial_prefetch_count = initial_prefetch_count
        self.prefetch_multiplier = prefetch_multiplier

        # this contains a tokenbucket for each task type by name, used for
        # rate limits, or None if rate limits are disabled for that task.
        self.task_buckets = defaultdict(lambda: None)
        self.reset_rate_limits()

        self.hub = hub
        if self.hub:
            self.amqheartbeat = amqheartbeat
            if self.amqheartbeat is None:
                self.amqheartbeat = self.app.conf.broker_heartbeat
        else:
            self.amqheartbeat = 0

        if not hasattr(self, 'loop'):
            self.loop = loops.asynloop if hub else loops.synloop

        if _detect_environment() == 'gevent':
            # there's a gevent bug that causes timeouts to not be reset,
            # so if the connection timeout is exceeded once, it can NEVER
            # connect again.
            self.app.conf.broker_connection_timeout = None

        self._pending_operations = []

        self.steps = []
        self.blueprint = self.Blueprint(
            app=self.app, on_close=self.on_close,
        )
        self.blueprint.apply(self, **dict(worker_options or {}, **kwargs))
Beispiel #5
0
def _trace_task_ret(name, uuid, request, body, content_type,
                    content_encoding, loads=loads_message, app=None,
                    **extra_request):
    app = app or current_app._get_current_object()
    embed = None
    if content_type:
        accept = prepare_accept_content(app.conf.accept_content)
        args, kwargs, embed = loads(
            body, content_type, content_encoding, accept=accept,
        )
    else:
        args, kwargs, embed = body
    hostname = gethostname()
    request.update({
        'args': args, 'kwargs': kwargs,
        'hostname': hostname, 'is_eager': False,
    }, **embed or {})
    R, I, T, Rstr = trace_task(app.tasks[name],
                               uuid, args, kwargs, request, app=app)
    return (1, R, T) if I else (0, Rstr, T)
Beispiel #6
0
def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
    names = p.values
    options = dict(p.options)
    ranges = len(names) == 1
    if ranges:
        try:
            names, prefix = _get_ranges(names)
        except ValueError:
            pass
    cmd = options.pop('--cmd', cmd)
    append = options.pop('--append', append)
    hostname = options.pop('--hostname', options.pop('-n', gethostname()))
    prefix = options.pop('--prefix', prefix) or ''
    suffix = options.pop('--suffix', suffix) or hostname
    suffix = '' if suffix in ('""', "''") else suffix

    _update_ns_opts(p, names)
    _update_ns_ranges(p, ranges)
    return (_args_for_node(p, name, prefix, suffix, cmd, append, options)
            for name in names)
Beispiel #7
0
def _trace_task_ret(name, uuid, request, body, content_type,
                    content_encoding, loads=loads_message, app=None,
                    **extra_request):
    app = app or current_app._get_current_object()
    embed = None
    if content_type:
        accept = prepare_accept_content(app.conf.accept_content)
        args, kwargs, embed = loads(
            body, content_type, content_encoding, accept=accept,
        )
    else:
        args, kwargs, embed = body
    hostname = gethostname()
    request.update({
        'args': args, 'kwargs': kwargs,
        'hostname': hostname, 'is_eager': False,
    }, **embed or {})
    R, I, T, Rstr = trace_task(app.tasks[name],
                               uuid, args, kwargs, request, app=app)
    return (1, R, T) if I else (0, Rstr, T)
Beispiel #8
0
def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
    names = p.values
    options = dict(p.options)
    ranges = len(names) == 1
    if ranges:
        try:
            names, prefix = _get_ranges(names)
        except ValueError:
            pass
    cmd = options.pop('--cmd', cmd)
    append = options.pop('--append', append)
    hostname = options.pop('--hostname',
                           options.pop('-n', gethostname()))
    prefix = options.pop('--prefix', prefix) or ''
    suffix = options.pop('--suffix', suffix) or hostname
    suffix = '' if suffix in ('""', "''") else suffix

    _update_ns_opts(p, names)
    _update_ns_ranges(p, ranges)
    return (_args_for_node(p, name, prefix, suffix, cmd, append, options)
            for name in names)
Beispiel #9
0
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                 Info=TraceInfo, eager=False, propagate=False, app=None,
                 monotonic=monotonic, truncate=truncate,
                 trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
    """Return a function that traces task execution; catches all
    exceptions and updates result backend with the state and result

    If the call was successful, it saves the result to the task result
    backend, and sets the task status to `"SUCCESS"`.

    If the call raises :exc:`~@Retry`, it extracts
    the original exception, uses that as the result and sets the task state
    to `"RETRY"`.

    If the call results in an exception, it saves the exception as the task
    result, and sets the task state to `"FAILURE"`.

    Return a function that takes the following arguments:

        :param uuid: The id of the task.
        :param args: List of positional args to pass on to the function.
        :param kwargs: Keyword arguments mapping to pass on to the function.
        :keyword request: Request dict.

    """
    # If the task doesn't define a custom __call__ method
    # we optimize it away by simply calling the run method directly,
    # saving the extra method call and a line less in the stack trace.
    fun = task if task_has_custom(task, '__call__') else task.run

    loader = loader or app.loader
    backend = task.backend
    ignore_result = task.ignore_result
    track_started = task.track_started
    track_started = not eager and (task.track_started and not ignore_result)
    publish_result = not eager and not ignore_result
    hostname = hostname or gethostname()

    loader_task_init = loader.on_task_init
    loader_cleanup = loader.on_process_cleanup

    task_on_success = None
    task_after_return = None
    if task_has_custom(task, 'on_success'):
        task_on_success = task.on_success
    if task_has_custom(task, 'after_return'):
        task_after_return = task.after_return

    store_result = backend.store_result
    mark_as_done = backend.mark_as_done
    backend_cleanup = backend.process_cleanup

    pid = os.getpid()

    request_stack = task.request_stack
    push_request = request_stack.push
    pop_request = request_stack.pop
    push_task = _task_stack.push
    pop_task = _task_stack.pop
    _does_info = logger.isEnabledFor(logging.INFO)
    resultrepr_maxsize = task.resultrepr_maxsize

    prerun_receivers = signals.task_prerun.receivers
    postrun_receivers = signals.task_postrun.receivers
    success_receivers = signals.task_success.receivers

    from celery import canvas
    signature = canvas.maybe_signature  # maybe_ does not clone if already

    def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True):
        if propagate:
            raise
        I = Info(state, exc)
        R = I.handle_error_state(
            task, request, eager=eager, call_errbacks=call_errbacks,
        )
        return I, R, I.state, I.retval

    def trace_task(uuid, args, kwargs, request=None):
        # R      - is the possibly prepared return value.
        # I      - is the Info object.
        # T      - runtime
        # Rstr   - textual representation of return value
        # retval - is the always unmodified return value.
        # state  - is the resulting task state.

        # This function is very long because we have unrolled all the calls
        # for performance reasons, and because the function is so long
        # we want the main variables (I, and R) to stand out visually from the
        # the rest of the variables, so breaking PEP8 is worth it ;)
        R = I = T = Rstr = retval = state = None
        task_request = None
        time_start = monotonic()
        try:
            try:
                kwargs.items
            except AttributeError:
                raise InvalidTaskError(
                    'Task keyword arguments is not a mapping')
            push_task(task)
            task_request = Context(request or {}, args=args,
                                   called_directly=False, kwargs=kwargs)
            root_id = task_request.root_id or uuid
            push_request(task_request)
            try:
                # -*- PRE -*-
                if prerun_receivers:
                    send_prerun(sender=task, task_id=uuid, task=task,
                                args=args, kwargs=kwargs)
                loader_task_init(uuid, task)
                if track_started:
                    store_result(
                        uuid, {'pid': pid, 'hostname': hostname}, STARTED,
                        request=task_request,
                    )

                # -*- TRACE -*-
                try:
                    R = retval = fun(*args, **kwargs)
                    state = SUCCESS
                except Reject as exc:
                    I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
                    state, retval = I.state, I.retval
                    I.handle_reject(task, task_request)
                except Ignore as exc:
                    I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
                    state, retval = I.state, I.retval
                    I.handle_ignore(task, task_request)
                except Retry as exc:
                    I, R, state, retval = on_error(
                        task_request, exc, uuid, RETRY, call_errbacks=False)
                except Exception as exc:
                    I, R, state, retval = on_error(task_request, exc, uuid)
                except BaseException as exc:
                    raise
                else:
                    try:
                        # callback tasks must be applied before the result is
                        # stored, so that result.children is populated.

                        # groups are called inline and will store trail
                        # separately, so need to call them separately
                        # so that the trail's not added multiple times :(
                        # (Issue #1936)
                        callbacks = task.request.callbacks
                        if callbacks:
                            if len(task.request.callbacks) > 1:
                                sigs, groups = [], []
                                for sig in callbacks:
                                    sig = signature(sig, app=app)
                                    if isinstance(sig, group):
                                        groups.append(sig)
                                    else:
                                        sigs.append(sig)
                                for group_ in groups:
                                    group_.apply_async(
                                        (retval,),
                                        parent_id=uuid, root_id=root_id,
                                    )
                                if sigs:
                                    group(sigs, app=app).apply_async(
                                        (retval,),
                                        parent_id=uuid, root_id=root_id,
                                    )
                            else:
                                signature(callbacks[0], app=app).apply_async(
                                    (retval,), parent_id=uuid, root_id=root_id,
                                )

                        # execute first task in chain
                        chain = task_request.chain
                        if chain:
                            signature(chain.pop(), app=app).apply_async(
                                (retval,), chain=chain,
                                parent_id=uuid, root_id=root_id,
                            )
                        mark_as_done(
                            uuid, retval, task_request, publish_result,
                        )
                    except EncodeError as exc:
                        I, R, state, retval = on_error(task_request, exc, uuid)
                    else:
                        if task_on_success:
                            task_on_success(retval, uuid, args, kwargs)
                        if success_receivers:
                            send_success(sender=task, result=retval)
                        if _does_info:
                            T = monotonic() - time_start
                            Rstr = saferepr(R, resultrepr_maxsize)
                            info(LOG_SUCCESS, {
                                'id': uuid, 'name': name,
                                'return_value': Rstr, 'runtime': T,
                            })

                # -* POST *-
                if state not in IGNORE_STATES:
                    if task_after_return:
                        task_after_return(
                            state, retval, uuid, args, kwargs, None,
                        )
            finally:
                try:
                    if postrun_receivers:
                        send_postrun(sender=task, task_id=uuid, task=task,
                                     args=args, kwargs=kwargs,
                                     retval=retval, state=state)
                finally:
                    pop_task()
                    pop_request()
                    if not eager:
                        try:
                            backend_cleanup()
                            loader_cleanup()
                        except (KeyboardInterrupt, SystemExit, MemoryError):
                            raise
                        except Exception as exc:
                            logger.error('Process cleanup failed: %r', exc,
                                         exc_info=True)
        except MemoryError:
            raise
        except Exception as exc:
            if eager:
                raise
            R = report_internal_error(task, exc)
            if task_request is not None:
                I, _, _, _ = on_error(task_request, exc, uuid)
        return trace_ok_t(R, I, T, Rstr)

    return trace_task
Beispiel #10
0
    def __init__(self, message, on_ack=noop,
                 hostname=None, eventer=None, app=None,
                 connection_errors=None, request_dict=None,
                 task=None, on_reject=noop, body=None,
                 headers=None, decoded=False, utc=True,
                 maybe_make_aware=maybe_make_aware,
                 maybe_iso8601=maybe_iso8601, **opts):
        if headers is None:
            headers = message.headers
        if body is None:
            body = message.body
        self.app = app
        self.message = message
        self.body = body
        self.utc = utc
        self._decoded = decoded
        if decoded:
            self.content_type = self.content_encoding = None
        else:
            self.content_type, self.content_encoding = (
                message.content_type, message.content_encoding,
            )

        self.id = headers['id']
        type = self.type = self.name = headers['task']
        self.root_id = headers.get('root_id')
        self.parent_id = headers.get('parent_id')
        if 'shadow' in headers:
            self.name = headers['shadow'] or self.name
        if 'timelimit' in headers:
            self.time_limits = headers['timelimit']
        self.argsrepr = headers.get('argsrepr', '')
        self.kwargsrepr = headers.get('kwargsrepr', '')
        self.on_ack = on_ack
        self.on_reject = on_reject
        self.hostname = hostname or gethostname()
        self.eventer = eventer
        self.connection_errors = connection_errors or ()
        self.task = task or self.app.tasks[type]

        # timezone means the message is timezone-aware, and the only timezone
        # supported at this point is UTC.
        eta = headers.get('eta')
        if eta is not None:
            try:
                eta = maybe_iso8601(eta)
            except (AttributeError, ValueError, TypeError) as exc:
                raise InvalidTaskError(
                    'invalid eta value {0!r}: {1}'.format(eta, exc))
            self.eta = maybe_make_aware(eta, self.tzlocal)
        else:
            self.eta = None

        expires = headers.get('expires')
        if expires is not None:
            try:
                expires = maybe_iso8601(expires)
            except (AttributeError, ValueError, TypeError) as exc:
                raise InvalidTaskError(
                    'invalid expires value {0!r}: {1}'.format(expires, exc))
            self.expires = maybe_make_aware(expires, self.tzlocal)
        else:
            self.expires = None

        delivery_info = message.delivery_info or {}
        properties = message.properties or {}
        headers.update({
            'reply_to': properties.get('reply_to'),
            'correlation_id': properties.get('correlation_id'),
            'delivery_info': {
                'exchange': delivery_info.get('exchange'),
                'routing_key': delivery_info.get('routing_key'),
                'priority': properties.get('priority'),
                'redelivered': delivery_info.get('redelivered'),
            }

        })
        self.request_dict = headers
Beispiel #11
0
def build_tracer(name,
                 task,
                 loader=None,
                 hostname=None,
                 store_errors=True,
                 Info=TraceInfo,
                 eager=False,
                 propagate=False,
                 app=None,
                 monotonic=monotonic,
                 truncate=truncate,
                 trace_ok_t=trace_ok_t,
                 IGNORE_STATES=IGNORE_STATES):
    """Return a function that traces task execution; catches all
    exceptions and updates result backend with the state and result

    If the call was successful, it saves the result to the task result
    backend, and sets the task status to `"SUCCESS"`.

    If the call raises :exc:`~@Retry`, it extracts
    the original exception, uses that as the result and sets the task state
    to `"RETRY"`.

    If the call results in an exception, it saves the exception as the task
    result, and sets the task state to `"FAILURE"`.

    Return a function that takes the following arguments:

        :param uuid: The id of the task.
        :param args: List of positional args to pass on to the function.
        :param kwargs: Keyword arguments mapping to pass on to the function.
        :keyword request: Request dict.

    """
    # If the task doesn't define a custom __call__ method
    # we optimize it away by simply calling the run method directly,
    # saving the extra method call and a line less in the stack trace.
    fun = task if task_has_custom(task, '__call__') else task.run

    loader = loader or app.loader
    backend = task.backend
    ignore_result = task.ignore_result
    track_started = task.track_started
    track_started = not eager and (task.track_started and not ignore_result)
    publish_result = not eager and not ignore_result
    hostname = hostname or gethostname()

    loader_task_init = loader.on_task_init
    loader_cleanup = loader.on_process_cleanup

    task_on_success = None
    task_after_return = None
    if task_has_custom(task, 'on_success'):
        task_on_success = task.on_success
    if task_has_custom(task, 'after_return'):
        task_after_return = task.after_return

    store_result = backend.store_result
    mark_as_done = backend.mark_as_done
    backend_cleanup = backend.process_cleanup

    pid = os.getpid()

    request_stack = task.request_stack
    push_request = request_stack.push
    pop_request = request_stack.pop
    push_task = _task_stack.push
    pop_task = _task_stack.pop
    _does_info = logger.isEnabledFor(logging.INFO)
    resultrepr_maxsize = task.resultrepr_maxsize

    prerun_receivers = signals.task_prerun.receivers
    postrun_receivers = signals.task_postrun.receivers
    success_receivers = signals.task_success.receivers

    from celery import canvas
    signature = canvas.maybe_signature  # maybe_ does not clone if already

    def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True):
        if propagate:
            raise
        I = Info(state, exc)
        R = I.handle_error_state(
            task,
            request,
            eager=eager,
            call_errbacks=call_errbacks,
        )
        return I, R, I.state, I.retval

    def trace_task(uuid, args, kwargs, request=None):
        # R      - is the possibly prepared return value.
        # I      - is the Info object.
        # T      - runtime
        # Rstr   - textual representation of return value
        # retval - is the always unmodified return value.
        # state  - is the resulting task state.

        # This function is very long because we have unrolled all the calls
        # for performance reasons, and because the function is so long
        # we want the main variables (I, and R) to stand out visually from the
        # the rest of the variables, so breaking PEP8 is worth it ;)
        R = I = T = Rstr = retval = state = None
        task_request = None
        time_start = monotonic()
        try:
            try:
                kwargs.items
            except AttributeError:
                raise InvalidTaskError(
                    'Task keyword arguments is not a mapping')
            push_task(task)
            task_request = Context(request or {},
                                   args=args,
                                   called_directly=False,
                                   kwargs=kwargs)
            root_id = task_request.root_id or uuid
            push_request(task_request)
            try:
                # -*- PRE -*-
                if prerun_receivers:
                    send_prerun(sender=task,
                                task_id=uuid,
                                task=task,
                                args=args,
                                kwargs=kwargs)
                loader_task_init(uuid, task)
                if track_started:
                    store_result(
                        uuid,
                        {
                            'pid': pid,
                            'hostname': hostname
                        },
                        STARTED,
                        request=task_request,
                    )

                # -*- TRACE -*-
                try:
                    R = retval = fun(*args, **kwargs)
                    state = SUCCESS
                except Reject as exc:
                    I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
                    state, retval = I.state, I.retval
                    I.handle_reject(task, task_request)
                except Ignore as exc:
                    I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
                    state, retval = I.state, I.retval
                    I.handle_ignore(task, task_request)
                except Retry as exc:
                    I, R, state, retval = on_error(task_request,
                                                   exc,
                                                   uuid,
                                                   RETRY,
                                                   call_errbacks=False)
                except Exception as exc:
                    I, R, state, retval = on_error(task_request, exc, uuid)
                except BaseException as exc:
                    raise
                else:
                    try:
                        # callback tasks must be applied before the result is
                        # stored, so that result.children is populated.

                        # groups are called inline and will store trail
                        # separately, so need to call them separately
                        # so that the trail's not added multiple times :(
                        # (Issue #1936)
                        callbacks = task.request.callbacks
                        if callbacks:
                            if len(task.request.callbacks) > 1:
                                sigs, groups = [], []
                                for sig in callbacks:
                                    sig = signature(sig, app=app)
                                    if isinstance(sig, group):
                                        groups.append(sig)
                                    else:
                                        sigs.append(sig)
                                for group_ in groups:
                                    group_.apply_async(
                                        (retval, ),
                                        parent_id=uuid,
                                        root_id=root_id,
                                    )
                                if sigs:
                                    group(sigs, app=app).apply_async(
                                        (retval, ),
                                        parent_id=uuid,
                                        root_id=root_id,
                                    )
                            else:
                                signature(callbacks[0], app=app).apply_async(
                                    (retval, ),
                                    parent_id=uuid,
                                    root_id=root_id,
                                )

                        # execute first task in chain
                        chain = task_request.chain
                        if chain:
                            signature(chain.pop(), app=app).apply_async(
                                (retval, ),
                                chain=chain,
                                parent_id=uuid,
                                root_id=root_id,
                            )
                        mark_as_done(
                            uuid,
                            retval,
                            task_request,
                            publish_result,
                        )
                    except EncodeError as exc:
                        I, R, state, retval = on_error(task_request, exc, uuid)
                    else:
                        if task_on_success:
                            task_on_success(retval, uuid, args, kwargs)
                        if success_receivers:
                            send_success(sender=task, result=retval)
                        if _does_info:
                            T = monotonic() - time_start
                            Rstr = saferepr(R, resultrepr_maxsize)
                            info(
                                LOG_SUCCESS, {
                                    'id': uuid,
                                    'name': name,
                                    'return_value': Rstr,
                                    'runtime': T,
                                })

                # -* POST *-
                if state not in IGNORE_STATES:
                    if task_after_return:
                        task_after_return(
                            state,
                            retval,
                            uuid,
                            args,
                            kwargs,
                            None,
                        )
            finally:
                try:
                    if postrun_receivers:
                        send_postrun(sender=task,
                                     task_id=uuid,
                                     task=task,
                                     args=args,
                                     kwargs=kwargs,
                                     retval=retval,
                                     state=state)
                finally:
                    pop_task()
                    pop_request()
                    if not eager:
                        try:
                            backend_cleanup()
                            loader_cleanup()
                        except (KeyboardInterrupt, SystemExit, MemoryError):
                            raise
                        except Exception as exc:
                            logger.error('Process cleanup failed: %r',
                                         exc,
                                         exc_info=True)
        except MemoryError:
            raise
        except Exception as exc:
            if eager:
                raise
            R = report_internal_error(task, exc)
            if task_request is not None:
                I, _, _, _ = on_error(task_request, exc, uuid)
        return trace_ok_t(R, I, T, Rstr)

    return trace_task
Beispiel #12
0
    def __init__(
        self,
        message,
        on_ack=noop,
        hostname=None,
        eventer=None,
        app=None,
        connection_errors=None,
        request_dict=None,
        task=None,
        on_reject=noop,
        body=None,
        headers=None,
        decoded=False,
        utc=True,
        maybe_make_aware=maybe_make_aware,
        maybe_iso8601=maybe_iso8601,
        **opts
    ):
        if headers is None:
            headers = message.headers
        if body is None:
            body = message.body
        self.app = app
        self.message = message
        self.body = body
        self.utc = utc
        self._decoded = decoded
        if decoded:
            self.content_type = self.content_encoding = None
        else:
            self.content_type, self.content_encoding = (message.content_type, message.content_encoding)

        self.id = headers["id"]
        type = self.type = self.name = headers["task"]
        self.root_id = headers.get("root_id")
        self.parent_id = headers.get("parent_id")
        if "shadow" in headers:
            self.name = headers["shadow"] or self.name
        if "timelimit" in headers:
            self.time_limits = headers["timelimit"]
        self.argsrepr = headers.get("argsrepr", "")
        self.kwargsrepr = headers.get("kwargsrepr", "")
        self.on_ack = on_ack
        self.on_reject = on_reject
        self.hostname = hostname or gethostname()
        self.eventer = eventer
        self.connection_errors = connection_errors or ()
        self.task = task or self.app.tasks[type]

        # timezone means the message is timezone-aware, and the only timezone
        # supported at this point is UTC.
        eta = headers.get("eta")
        if eta is not None:
            try:
                eta = maybe_iso8601(eta)
            except (AttributeError, ValueError, TypeError) as exc:
                raise InvalidTaskError("invalid eta value {0!r}: {1}".format(eta, exc))
            self.eta = maybe_make_aware(eta, self.tzlocal)
        else:
            self.eta = None

        expires = headers.get("expires")
        if expires is not None:
            try:
                expires = maybe_iso8601(expires)
            except (AttributeError, ValueError, TypeError) as exc:
                raise InvalidTaskError("invalid expires value {0!r}: {1}".format(expires, exc))
            self.expires = maybe_make_aware(expires, self.tzlocal)
        else:
            self.expires = None

        delivery_info = message.delivery_info or {}
        properties = message.properties or {}
        headers.update(
            {
                "reply_to": properties.get("reply_to"),
                "correlation_id": properties.get("correlation_id"),
                "delivery_info": {
                    "exchange": delivery_info.get("exchange"),
                    "routing_key": delivery_info.get("routing_key"),
                    "priority": properties.get("priority"),
                    "redelivered": delivery_info.get("redelivered"),
                },
            }
        )
        self.request_dict = headers