예제 #1
0
def register_signal():
    def process_failure_signal(sender, task_id, exception, traceback, einfo,
                               args, kwargs, **kw):

        func = sender.name
        arg_str = ''
        for arg in args:
            arg_str = arg_str + str(arg) + ', '
        for key, val in kwargs.iteritems():
            arg_str = arg_str + '%s=%s, ' % (key, str(val))
        func = '%s(%s)' % (sender.name, arg_str)

        task_info = [
            'task: %s\n' % func,
            'task id: %s\n' % task_id,
        ]
        task_info = ''.join(task_info)

        type_, value, tb = einfo.exc_info
        exc_info = Traceback.format_exception(type_, value, tb)
        einfo = ''.join(exc_info)
        einfo = task_info + einfo
        log.warning(einfo)
        django_settings.COLLECT_EXCEPTIONS_CONFIG['exception_collector'](einfo)

    log.info('register_signal success')

    task_failure.connect(process_failure_signal, weak=False)
예제 #2
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    def after_setup_logger_signal(sender=None, logger=None, loglevel=None,
                                  logfile=None, format=None,
                                  colorize=None, **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config['enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_success.connect(postrun_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
예제 #3
0
    def __init__(self, parent, **kwargs):
        super().__init__(parent, **kwargs)
        import celery
        from celery.signals import (
            before_task_publish,
            after_task_publish,
            task_prerun,
            task_retry,
            task_success,
            task_failure,
            task_revoked,
        )

        before_task_publish.connect(receivers.receiver_before_task_publish)
        after_task_publish.connect(receivers.receiver_after_task_publish)
        task_prerun.connect(receivers.receiver_task_pre_run)
        task_retry.connect(receivers.receiver_task_retry)
        task_success.connect(receivers.receiver_task_success)
        task_failure.connect(receivers.receiver_task_failure)
        task_revoked.connect(receivers.receiver_task_revoked)
        if celery.VERSION > (4, ):

            from celery.signals import task_unknown, task_rejected

            task_unknown.connect(receivers.receiver_task_unknown)
            task_rejected.connect(receivers.receiver_task_rejected)
예제 #4
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    def after_setup_logger_signal(sender=None, logger=None, loglevel=None,
                                  logfile=None, format=None,
                                  colorize=None, **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config['enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
예제 #5
0
def connect_failure_handler():
    """
    Connect the bugsnag failure_handler to the Celery
    task_failure signal
    """
    bugsnag.configure().runtime_versions['celery'] = celery.__version__
    task_failure.connect(failure_handler, weak=False)
예제 #6
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs,
                               traceback, einfo, **kw):
        client.captureException(exc_info=einfo.exc_info,
                                extra={
                                    'task_id': task_id,
                                    'task': sender,
                                    'args': args,
                                    'kwargs': kwargs,
                                })

    task_failure.connect(process_failure_signal, weak=False)

    def process_logger_event(sender, logger, loglevel, logfile, format,
                             colorize, **kw):
        import logging
        logger = logging.getLogger()
        handler = SentryHandler(client)
        if handler.__class__ in list(map(type, logger.handlers)):
            return False
        handler.setLevel(logging.ERROR)
        handler.addFilter(CeleryFilter())
        logger.addHandler(handler)

    after_setup_logger.connect(process_logger_event, weak=False)
예제 #7
0
    def add_run_signals(cls, dapper_local):
        from celery.signals import task_prerun, task_postrun, task_failure, task_revoked
        task_prerun.connect(cls.task_begin)
        task_postrun.connect(cls.task_end)
        task_failure.connect(cls.task_fail)
        task_revoked.connect(cls.task_revoked)

        cls.dapper_local = dapper_local
예제 #8
0
def celery_enable_all():
    """Enable johnny-cache in all celery tasks, clearing the local-store
    after each task."""
    from celery.signals import task_prerun, task_postrun, task_failure
    task_prerun.connect(prerun_handler)
    task_postrun.connect(postrun_handler)
    # Also have to cleanup on failure.
    task_failure.connect(postrun_handler)
예제 #9
0
파일: __init__.py 프로젝트: rtnpro/raven
def register_signal(client):
    def process_failure_signal(exception, traceback, sender, task_id, signal, args, kwargs, einfo, **kw):
        exc_info = (type(exception), exception, traceback)
        client.captureException(
            exc_info=exc_info, extra={"task_id": task_id, "sender": sender, "args": args, "kwargs": kwargs}
        )

    task_failure.connect(process_failure_signal)
예제 #10
0
    def setup_once():
        task_prerun.connect(_handle_task_prerun, weak=False)
        task_postrun.connect(_handle_task_postrun, weak=False)
        task_failure.connect(_process_failure_signal, weak=False)

        # This logger logs every status of every task that ran on the worker.
        # Meaning that every task's breadcrumbs are full of stuff like "Task
        # <foo> raised unexpected <bar>".
        ignore_logger("celery.worker.job")
예제 #11
0
 def connect_traced_handlers(self):
     if self._propagate:
         before_task_publish.connect(self._prepublish, weak=False)
         after_task_publish.connect(self._postpublish, weak=False)
     task_prerun.connect(self._start_span, weak=False)
     task_failure.connect(self._tag_error, weak=False)
     task_retry.connect(self._tag_retry, weak=False)
     task_postrun.connect(self._finish_span, weak=False)
     log.debug('Registered CeleryTracing signal handlers.')
예제 #12
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs,
                               traceback, einfo, **kw):
        client.capture_exception(extra={
            'task_id': task_id,
            'task': sender,
            'args': args,
            'kwargs': kwargs,
        })

    task_failure.connect(process_failure_signal, weak=False)
예제 #13
0
def register_signal():
    """Adapted from `raven.contrib.celery.register_signal`. Remove args and
    kwargs from logs so that keys aren't leaked to Sentry.
    """
    def process_failure_signal(sender, task_id, *args, **kwargs):
        with sentry_sdk.configure_scope() as scope:
            scope.set_tag('task_id', task_id)
            scope.set_tag('task', sender)
            sentry_sdk.capture_exception()

    task_failure.connect(process_failure_signal, weak=False)
예제 #14
0
def register_signal(client):
    def process_failure_signal(sender, task_id, args, kwargs, **kw):
        # This signal is fired inside the stack so let raven do its magic
        client.captureException(extra={
            'task_id': task_id,
            'task': sender,
            'args': args,
            'kwargs': kwargs,
        })

    task_failure.connect(process_failure_signal, weak=False)
예제 #15
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs,
                               traceback, einfo, **kw):
        client.capture_exception(
            extra={
                'task_id': task_id,
                'task': sender,
                'args': args,
                'kwargs': kwargs,
            })
    task_failure.connect(process_failure_signal, weak=False)
예제 #16
0
def register_signal(client):
    """Adapted from `raven.contrib.celery.register_signal`. Remove args and
    kwargs from logs so that keys aren't leaked to Sentry.
    """
    def process_failure_signal(sender, task_id, *args, **kwargs):
        client.captureException(extra={
            'task_id': task_id,
            'task': sender,
        })

    task_failure.connect(process_failure_signal, weak=False)
예제 #17
0
def register_signal(client):
    """Adapted from `raven.contrib.celery.register_signal`. Remove args and
    kwargs from logs so that keys aren't leaked to Sentry.
    """
    def process_failure_signal(sender, task_id, *args, **kwargs):
        client.captureException(
            extra={
                'task_id': task_id,
                'task': sender,
            }
        )
    task_failure.connect(process_failure_signal, weak=False)
예제 #18
0
def install(app=None):
    if app is not None:
        copy_configuration(app)

    installed = scout_apm.core.install()
    if not installed:
        return

    before_task_publish.connect(before_task_publish_callback)
    task_prerun.connect(task_prerun_callback)
    task_failure.connect(task_failure_callback)
    task_postrun.connect(task_postrun_callback)
예제 #19
0
def register_signal(client):
    def process_failure_signal(sender, task_id, args, kwargs, **kw):
        # This signal is fired inside the stack so let raven do its magic
        client.captureException(
            extra={
                'task_id': task_id,
                'task': sender,
                'args': args,
                'kwargs': kwargs,
            })

    task_failure.connect(process_failure_signal, weak=False)
예제 #20
0
파일: __init__.py 프로젝트: mfrasca/raven
def register_signal(client):
    def process_failure_signal(exception, traceback, sender, task_id,
                               signal, args, kwargs, einfo, **kw):
        exc_info = (type(exception), exception, traceback)
        client.captureException(
            exc_info=exc_info,
            extra={
                'task_id': task_id,
                'sender': sender,
                'args': args,
                'kwargs': kwargs,
            })
    task_failure.connect(process_failure_signal)
예제 #21
0
def register_signal(client, ignore_expected=False):
    def process_failure_signal(sender, task_id, args, kwargs, einfo, **kw):
        if ignore_expected and isinstance(einfo.exception, sender.throws):
            return
        if isinstance(einfo.exception, SoftTimeLimitExceeded):
            fingerprint = ['celery', 'SoftTimeLimitExceeded', sender]
        else:
            fingerprint = None
        client.captureException(extra={
            'task_id': task_id,
            'task': sender,
            'args': args,
            'kwargs': kwargs
        },
                                fingerprint=fingerprint)

    task_failure.connect(process_failure_signal, weak=False)
예제 #22
0
def register_signal(client):
    def process_failure_signal(sender, task_id, args, kwargs, einfo, **kw):
        # This signal is fired inside the stack so let raven do its magic
        if isinstance(einfo.exception, SoftTimeLimitExceeded):
            fingerprint = ['celery', 'SoftTimeLimitExceeded', sender]
        else:
            fingerprint = None
        client.captureException(
            extra={
                'task_id': task_id,
                'task': sender,
                'args': args,
                'kwargs': kwargs,
            },
            fingerprint=fingerprint,
        )

    task_failure.connect(process_failure_signal, weak=False)
예제 #23
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs,
                               traceback, einfo, **kw):
        if hasattr(einfo, 'exc_info'):
            # for Celery 2.4 or later
            exc_info = einfo.exc_info
        else:
            # for Celery before 2.4
            exc_info = (type(exception), exception, traceback)

        client.captureException(
            exc_info=exc_info,
            extra={
                'task_id': task_id,
                'task': sender,
                'args': args,
                'kwargs': kwargs,
            })

    task_failure.connect(process_failure_signal, weak=False)
예제 #24
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs, traceback, einfo, **kw):
        client.captureException(
            exc_info=einfo.exc_info, extra={"task_id": task_id, "task": sender, "args": args, "kwargs": kwargs}
        )

    task_failure.connect(process_failure_signal, weak=False)

    def process_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw):
        import logging

        logger = logging.getLogger()
        handler = SentryHandler(client)
        if handler.__class__ in list(map(type, logger.handlers)):
            return False
        handler.setLevel(logging.ERROR)
        handler.addFilter(CeleryFilter())
        logger.addHandler(handler)

    after_setup_logger.connect(process_logger_event, weak=False)
예제 #25
0
    def install(self,
                config={},
                context_generators={},
                report_exceptions=False):
        """
        Setup Celery - Honeybadger integration.
        :param dict[str, T] config: a configuration object to read config from.
        :param context_generators: Context generators
        :param bool report_exceptions: whether to automatically report exceptions on tasks or not.
        """
        self.initialize_honeybadger(config)
        self.context_generators = context_generators
        self.report_exceptions = report_exceptions
        task_prerun.connect(self.setup_context, weak=False)
        task_postrun.connect(self.reset_context, weak=False)
        if self.report_exceptions:
            task_failure.connect(self._failure_handler, weak=False)

        self._patch_generic_request_payload()
        logger.info('Registered Celery signal handlers')
예제 #26
0
def register_signal(client):
    def process_failure_signal(sender, task_id, exception, args, kwargs,
                               traceback, einfo, **kw):
        client.captureException(
            extra={
                'task_id': task_id,
                'task': sender,
                'args': args,
                'kwargs': kwargs,
            })
    task_failure.connect(process_failure_signal, weak=False)

    def process_logger_event(sender, logger, loglevel, logfile, format,
                             colorize, **kw):
        import logging
        logger = logging.getLogger()
        handler = OpbeatHandler(client)
        if handler.__class__ in map(type, logger.handlers):
            return False
        handler.setLevel(logging.ERROR)
        handler.addFilter(CeleryFilter())
        logger.addHandler(handler)
예제 #27
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    return True
def connect_failure_handler():
    """
    Connect the bugsnag failure_handler to the Celery
    task_failure signal
    """
    task_failure.connect(failure_handler, weak=False)
예제 #29
0
    after_setup_logger.connect(initialize_slack_logger)


@after_task_publish.connect
def update_sent_state(sender=None, headers=None, **kwargs):
    """Change task status to SENT when task is published """
    # By default task status is PENDING if you get a non existing task by id
    # its status will be PENDING changing to SENT will confirm task exists

    task = celery.tasks.get(sender)
    backend = task.backend if task else celery.backend
    backend.store_result(headers["id"], None, "SENT")


def clean_task_from_backend(task_id, **kwargs):
    """Clean tasks entries from backend"""
    AsyncResult(task_id).forget()
    gc.collect()


task_postrun.connect(clean_task_from_backend)
task_failure.connect(clean_task_from_backend)


@worker_process_shutdown.connect
def shutdown_worker(**kwargs):
    LOGGER.info("Shutting worker")
    with app.app_context():
        db.session.close()
        db.engine.dispose()
예제 #30
0
    task = ManagedTask.objects.get(celery_task_id=task_id)
    task.celery_task_status = 5
    task.is_enable = False
    task.save(check_enable=False)


def on_task_rejected(*args, **kwargs):
    task = kwargs['sender']

    task_id = task.request.id
    task = ManagedTask.objects.get(celery_task_id=task_id)
    task.celery_task_status = 6
    task.is_enable = False
    task.save(check_enable=False)


def on_task_worker_shutting_down(*args, **kwargs):
    ManagedTask.objects.filter(is_enable=True).update(celery_task_status=6)


worker_ready.connect(on_worker_ready, dispatch_uid='on_worker_ready')
task_prerun.connect(on_task_prerun, dispatch_uid='on_task_prerun')
task_postrun.connect(on_task_finished, dispatch_uid='on_task_postrun')

task_success.connect(on_task_success, dispatch_uid='on_task_success')
task_retry.connect(on_task_retry, dispatch_uid='on_task_retry')
task_failure.connect(on_task_failure, dispatch_uid='on_task_failure')
task_revoked.connect(on_task_revoked, dispatch_uid='on_task_revoked')
task_rejected.connect(on_task_rejected, dispatch_uid='on_task_rejected')
worker_shutting_down.connect(on_task_worker_shutting_down,
                             dispatch_uid='on_task_worker_shutting')
예제 #31
0
def connect_failure_handler():
    """
    Connect the bugsnag failure_handler to the Celery
    task_failure signal
    """
    task_failure.connect(failure_handler, weak=False)
예제 #32
0
 def __init__(self, app_id):
     self.app_id = app_id
     task_failure.connect(self.exception_handler, weak=False)
예제 #33
0
 def __init__(self, app_id):
     self.app_id = app_id
     task_failure.connect(self.exception_handler, weak=False)
예제 #34
0
        celery_logger.error('Celery job exception: %s(%s)' %
                            (exception.__class__.__name__, exception),
                            exc_info=exc_info,
                            extra={
                                'data': {
                                    'task_id': task_id,
                                    'sender': sender,
                                    'args': args,
                                    'kwargs': kwargs,
                                }
                            })
    except:
        pass


task_failure.connect(process_failure_signal)


@task
def cleanup():
    import datetime
    from django.db import transaction
    from django.contrib.sessions.models import Session
    from djcelery.models import TaskState
    from auth.models import EmailConfirmation

    EmailConfirmation.objects.delete_expired_confirmations()

    now = datetime.datetime.now()
    Session.objects.filter(expire_date__lt=now).delete()
예제 #35
0
"""
General-purpose tasks that are not tied to a given app.

Right now this is mostly to hook celery into johnny-cache, so our app server
and celery are on the same page as far as cache invalidation goes. Celery does
not have any notion of middleware, so we have to fake it with 
"""
from johnny.cache import get_backend, local
from celery.signals import task_prerun, task_postrun, task_failure
# from johnny.middleware import QueryCacheMiddleware, LocalStoreClearMiddleware

def task_prerun_handler(*args, **kwargs):
    """
    Before each Task is ran, we have to instantiate Johnny's query cache
    monkey patch. This will make sure that any table writes invalidate table
    caches, and reads pull from any existing caches.
    """
    get_backend().patch()
task_prerun.connect(task_prerun_handler)

def task_postrun_handler(*args, **kwargs):
    """
    After each task is ran, the LocalStore cache (similar to threadlocals) is
    cleared, as is the case with views (instead of celery tasks).
    """
    local.clear()

task_postrun.connect(task_postrun_handler)
# Also have to cleanup on failure.
task_failure.connect(task_postrun_handler)
예제 #36
0
    A decorator to memoize functions on a per-request basis.
    Arguments to the memoized function should NOT be objects
    Use primitive types as arguments
    """

    def wrapped(*args, **kwargs):
        # if no request, skip cache
        if app.env.request is None:
            return func(*args, **kwargs)

        if not hasattr(_cache, "items"):
            _cache.items = {}
        cache_key = (func, repr(args), repr(kwargs))
        if cache_key in _cache.items:
            rv = _cache.items[cache_key]
        else:
            rv = func(*args, **kwargs)
            _cache.items[cache_key] = rv
        return rv

    return wrapped


def clear_cache(**kwargs):
    _cache.items = {}


request_finished.connect(clear_cache)
task_failure.connect(clear_cache)
task_success.connect(clear_cache)
예제 #37
0
    mo = VIMEO_URL_RE.match(url)
    if mo:
        video_id = mo.group('ident')

        h = httplib2.Http(timeout=10)
        url = ('http://vimeo.com/api/v2/video/%s.json' % video_id)
        response, video_data = h.request(url, headers={'User-Agent': 'friendstream/1.0'})
        if response.status != 200:
            raise ValueError("Unexpected response %d %s getting data for Vimeo video %s"
                % (response.status, response.reason, video_id))

        video, created = Video.objects.get_or_create(service='vimeo.com', ident=video_id,
            defaults={'data': video_data})
        return video

    # nope!
    log.debug("Well, %s isn't a video, skip it", url)


def task_failed(exception, traceback, sender, task_id, signal, args, kwargs, einfo, **kw):
    exc_info = (type(exception), exception, traceback)
    log.error('%s: %s', exception.__class__.__name__, str(exception),
        exc_info=exc_info,
        extra={
            'data': {'task_id': task_id, 'sender': sender, 'args': args, 'kwargs': kwargs},
        },
    )

task_failure.connect(task_failed)
예제 #38
0
                  'total': total,
                  'foias': data[:20]
              }),
              '*****@*****.**', ['*****@*****.**'],
              fail_silently=False)


def process_failure_signal(exception, traceback, sender, task_id, signal, args,
                           kwargs, einfo, **kw):
    """Log celery exceptions to sentry"""
    # http://www.colinhowe.co.uk/2011/02/08/celery-and-sentry-recording-errors/
    # pylint: disable=too-many-arguments
    # pylint: disable=unused-argument
    exc_info = (type(exception), exception, traceback)
    logger.error('Celery job exception: %s(%s)',
                 exception.__class__.__name__,
                 exception,
                 exc_info=exc_info,
                 extra={
                     'data': {
                         'task_id': task_id,
                         'sender': sender,
                         'args': args,
                         'kwargs': kwargs,
                     }
                 })


task_failure.connect(process_failure_signal,
                     dispatch_uid='muckrock.foia.tasks.logging')
예제 #39
0
    # advanced_celery adds workarounds for celery issue which requires specific import order
    from apps.task.celery_backend.advanced_celery import AdvancedCelery  # noqa
    from apps.task.utils.task_utils import TaskUtils

    def on_failure(*args, **kwargs):
        exc = kwargs.get('exception')
        if not exc:
            return
        if isinstance(exc, InterfaceError):
            if exc.args and 'connection already closed' in exc.args[0]:
                print(
                    'on_failure(InterfaceError): shutting down DB connection')
                # clear the DB connection
                TaskUtils.prepare_task_execution()

    task_failure.connect(on_failure)

    app = AdvancedCelery('apps')

    def add_preload_options(parser):
        parser.add_argument(
            '-R',
            '--role',
            default=None,
            help='Celery worker role.',
        )

    app.user_options['preload'].add(add_preload_options)

    app.config_from_object('django.conf:settings', namespace='CELERY')
    app.autodiscover_tasks(force=True)
예제 #40
0
from celery import Celery
from celery.signals import task_failure
from celery.utils.log import get_task_logger
from {{cookiecutter.project_slug}}.settings import settings
logger = get_task_logger(__name__)
from tornado import httputil
# app = Celery('coder', broker_url='redis://localhost:6379/1')
# celery的redis不支持client_name
app = Celery('coder',
             broker=httputil.url_concat(settings['celery__broker_url'],
                                        dict(
             )
             ),
             backend=httputil.url_concat(settings['celery__broker_url'],
                                         dict(
             )
             ),
             include=['{{cookiecutter.project_slug}}.tasks.error.task_error_callback',
                      ])


# from coder.tasks.task_send_error_email import send_error_email
from {{cookiecutter.project_slug}}.utils.util_error import util_error_send_email

task_failure.connect()(util_error_send_email)
예제 #41
0
 def _install_patches(self):
     Task.apply_async = task_apply_async_wrapper
     before_task_publish.connect(before_task_publish_handler)
     task_prerun.connect(task_prerun_handler)
     task_success.connect(task_success_handler)
     task_failure.connect(task_failure_handler)
예제 #42
0
def register_handlers(taskclass):
    task_sent.connect(on_task_sent, tasks[taskclass.name])
    task_prerun.connect(on_task_prerun, tasks[taskclass.name])
    task_postrun.connect(on_task_postrun, tasks[taskclass.name])
    task_failure.connect(on_task_failure, tasks[taskclass.name])
    return taskclass
예제 #43
0
def process_failure_signal(exception, traceback, sender, task_id,  
                           signal, args, kwargs, einfo, **kw):  
    exc_info = (type(exception), exception, traceback)  
    celery_logger.error(  
        'Celery job exception: %s(%s)' % (exception.__class__.__name__, exception),  
        exc_info=exc_info,  
        extra={  
            'data': {  
                'task_id': task_id,  
                'sender': sender,  
                'args': args,  
                'kwargs': kwargs,  
            }  
        }  
    )  
task_failure.connect(process_failure_signal)  

@periodic_task(run_every=crontab(hour=3, day_of_week=1))
def cleanup():
    import datetime
    from django.db import transaction
    from django.contrib.sessions.models import Session
    from djcelery.models import TaskState
    
    now = datetime.datetime.now()
    Session.objects.filter(expire_date__lt=now).delete()
    
    d = now - datetime.timedelta(days=31)
    TaskState.objects.filter(tstamp__lt=d).delete()
    transaction.commit_unless_managed()
    
예제 #44
0
 def install(self):
     task_prerun.connect(self.handle_task_prerun, weak=False)
     task_postrun.connect(self.handle_task_postrun, weak=False)
     task_failure.connect(self.process_failure_signal, weak=False)
예제 #45
0
파일: celery.py 프로젝트: robotice/robotice
def register_signal(client):
    def process_failure_signal(sender, task_id, args, kwargs, **kw):
        # This signal is fired inside the stack so let raven do its magic
        client.captureException(extra={"task_id": task_id, "task": sender, "args": args, "kwargs": kwargs})

    task_failure.connect(process_failure_signal, weak=False)