def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    def after_setup_logger_signal(sender=None, logger=None, loglevel=None,
                                  logfile=None, format=None,
                                  colorize=None, **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config['enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_success.connect(postrun_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
Example #2
0
def celery_enable_all():
    """Enable johnny-cache in all celery tasks, clearing the local-store
    after each task."""
    from celery.signals import task_prerun, task_postrun, task_failure
    task_prerun.connect(prerun_handler)
    task_postrun.connect(postrun_handler)
    # Also have to cleanup on failure.
    task_failure.connect(postrun_handler)
Example #3
0
 def connect(self, task_started, task_retried, task_failed, task_succeeded):
     """Connect the celery BPMTask events to callback celery tasks."""
     self.task_started = task_started
     self.task_retried = task_retried
     self.task_failed = task_failed
     self.task_succeeded = task_succeeded
     task_prerun.connect(self._task_prerun)
     task_postrun.connect(self._task_postrun)
Example #4
0
 def _register_handlers(self, app):
     app.before_request(self.connect_db)
     app.teardown_request(self.close_db)
     try:
         from celery.signals import task_prerun, task_postrun
         task_prerun.connect(lambda *arg, **kw: self.connect_db())
         task_postrun.connect(lambda *arg, **kw: self.close_db(None))
     except ImportError:
         pass
Example #5
0
 def connect_traced_handlers(self):
     if self._propagate:
         before_task_publish.connect(self._prepublish, weak=False)
         after_task_publish.connect(self._postpublish, weak=False)
     task_prerun.connect(self._start_span, weak=False)
     task_failure.connect(self._tag_error, weak=False)
     task_retry.connect(self._tag_retry, weak=False)
     task_postrun.connect(self._finish_span, weak=False)
     log.debug('Registered CeleryTracing signal handlers.')
Example #6
0
 def _try_setup_celery(self):
     try:
         from celery.signals import task_prerun, task_postrun
         task_prerun.connect(
             lambda *arg, **kw: self.connect_db(), weak=False)
         task_postrun.connect(
             lambda *arg, **kw: self.close_db(), weak=False)
     except ImportError:
         pass
Example #7
0
    def update_celery(self, new_celery: celery.Celery) -> None:
        if self.app:
            self.celery.__dict__.update(vars(new_celery))
            self.celery.conf.update(self.app.config.get_namespace("CELERY_"))

            worker_process_init.connect(self._worker_process_init)

            task_postrun.connect(self._task_postrun)
            task_prerun.connect(self._task_prerun)
Example #8
0
    def setup_once():
        task_prerun.connect(_handle_task_prerun, weak=False)
        task_postrun.connect(_handle_task_postrun, weak=False)
        task_failure.connect(_process_failure_signal, weak=False)

        # This logger logs every status of every task that ran on the worker.
        # Meaning that every task's breadcrumbs are full of stuff like "Task
        # <foo> raised unexpected <bar>".
        ignore_logger("celery.worker.job")
Example #9
0
def install(app=None):
    if app is not None:
        copy_configuration(app)

    installed = scout_apm.core.install()
    if not installed:
        return

    before_task_publish.connect(before_task_publish_callback)
    task_prerun.connect(task_prerun_callback)
    task_postrun.connect(task_postrun_callback)
Example #10
0
def bind(endpoint=None):
    if not endpoint:
        endpoint = Endpoint("Celery")

    events.endpoint = endpoint

    log.info("Attaching zipkin to celery signals")
    before_task_publish.connect(events.task_send_handler)
    task_prerun.connect(events.task_prerun_handler)
    task_postrun.connect(events.task_postrun_handler)
    log.info("zipkin signals attached")
Example #11
0
    def on_worker_ready(self, sender, **_kwargs):
        task_prerun.connect(self.on_task_prerun)
        task_postrun.connect(self.on_task_postrun)

        # TODO Similar metrics for other pool implementations.
        if ThreadTaskPool and isinstance(sender.pool, ThreadTaskPool):
            collector = CeleryThreadPoolCollector(sender.pool.executor)
            prometheus_client.REGISTRY.register(collector)

        prometheus_client.start_http_server(9000)
        log.info("Prometheus exporter started for Celery worker on :9000")
Example #12
0
def connect(app):
    task_prerun.connect(report_monitor_begin, weak=False)
    task_postrun.connect(report_monitor_complete, weak=False)

    # XXX(dcramer): Celery docs suggest it should be app.conf.beat_schedule, which
    # was likely a change in 4.x. This code is intended to support "any celery" and be
    # adopted into sentry-sdk core, thus we support it here.
    schedule = app.conf.beat_schedule if hasattr(
        app.conf, 'beat_schedule') else app.conf['CELERYBEAT_SCHEDULE']
    for schedule_name, monitor_id in six.iteritems(
            settings.SENTRY_CELERYBEAT_MONITORS):
        schedule[schedule_name].setdefault('options', {}).setdefault(
            'headers', {}).setdefault('X-Sentry-Monitor', monitor_id)
Example #13
0
    def __init__(self, *args, **kwargs):
        '''
        A task will be initialized for every process, but not for every task!
        '''
        Task.__init__(self, *args, **kwargs)
        self.__result_database_storage = None
        self.__apk_storage = None
        self.__script_hashes = None
        self.__androscripts = None

        # register signal to prefetch apks
        task_prerun.connect(self.prefetch_apk)

        log.debug("%s init", self)
Example #14
0
    def __init__(self, *args, **kwargs):
        '''
        A task will be initialized for every process, but not for every task!
        '''
        Task.__init__(self, *args, **kwargs)
        self.__result_database_storage = None
        self.__apk_storage = None
        self.__script_hashes = None
        self.__androscripts = None

        # register signal to prefetch apks
        task_prerun.connect(self.prefetch_apk)

        log.debug("%s init", self)
Example #15
0
    def init_app(self, app, sentry=None):
        self.app = app
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config['CELERY_BROKER_URL'],
            backend=app.config['CELERY_RESULT_BACKEND'],
        )
        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)

        worker_process_init.connect(self._worker_process_init)

        task_postrun.connect(self._task_postrun)
        task_prerun.connect(self._task_prerun)
Example #16
0
    def init_app(self, app):
        self.app = app
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config["CELERY_BROKER_URL"],
            backend=app.config["CELERY_RESULT_BACKEND"],
        )
        # XXX(dcramer): why the hell am I wasting time trying to make Celery work?
        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)

        worker_process_init.connect(self._worker_process_init)

        task_postrun.connect(self._task_postrun)
        task_prerun.connect(self._task_prerun)
Example #17
0
def test_celery_run_without_parent_span(span_in_context_mock, celery_eager,
                                        tracer, task_error):
    def task_prerun_hook(task, **kwargs):
        task.request.delivery_info['is_eager'] = False

    task_prerun.connect(task_prerun_hook)
    task_prerun.receivers = list(reversed(task_prerun.receivers))
    try:
        result = _test_foo_task(celery_eager, task_error)
    finally:
        task_prerun.disconnect(task_prerun_hook)

    span_server = tracer.recorder.get_spans()[0]
    assert span_server.parent_id is None
    assert_span(span_server, result, 'run', tags.SPAN_KIND_RPC_SERVER)
Example #18
0
    def init_app(self, app, sentry):
        self.app = app
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config['CELERY_BROKER_URL'],
            backend=app.config['CELERY_RESULT_BACKEND'],
        )
        # XXX(dcramer): why the hell am I wasting time trying to make Celery work?
        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)

        task_prerun.connect(self._task_prerun)
        task_postrun.connect(self._task_postrun)

        if sentry:
            register_signal(sentry.client)
            register_logger_signal(sentry.client)
Example #19
0
def connect(app):
    task_prerun.connect(report_monitor_begin, weak=False)
    task_postrun.connect(report_monitor_complete, weak=False)

    # XXX(dcramer): Celery docs suggest it should be app.conf.beat_schedule, which
    # was likely a change in 4.x. This code is intended to support "any celery" and be
    # adopted into sentry-sdk core, thus we support it here.
    schedule = app.conf.beat_schedule if hasattr(
        app.conf, 'beat_schedule') else app.conf['CELERYBEAT_SCHEDULE']
    for schedule_name, monitor_id in six.iteritems(settings.SENTRY_CELERYBEAT_MONITORS):
        schedule[schedule_name].setdefault(
            'options',
            {}).setdefault(
            'headers',
            {}).setdefault(
            'X-Sentry-Monitor',
            monitor_id)
Example #20
0
    def _connect_signals(self):
        """Connect callbacks to celery signals.

        Since we are creating partials here, the weak arg must be False.
        """
        # Beat
        if self.options["show_beat"]:
            beat_init.connect(slack_beat_init(**self.options), weak=False)

        # Celery
        if self.options["show_startup"]:
            celeryd_init.connect(slack_celery_startup(**self.options), weak=False)
        if self.options["show_shutdown"]:
            worker_shutdown.connect(slack_celery_shutdown(**self.options), weak=False)

        # Task
        task_prerun.connect(slack_task_prerun(**self.options), weak=False)
def register_signals(APPENLIGHT_CLIENT):
    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs,
                       **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT,
                    fake_environ,
                    gather_exception=False,
                    start_time=start_time,
                    end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT,
                    fake_environ,
                    start_time=start_time,
                    end_time=end_time)

    def after_setup_logger_signal(sender=None,
                                  logger=None,
                                  loglevel=None,
                                  logfile=None,
                                  format=None,
                                  colorize=None,
                                  **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config[
                'enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_success.connect(postrun_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
Example #22
0
    def install(self,
                config={},
                context_generators={},
                report_exceptions=False):
        """
        Setup Celery - Honeybadger integration.
        :param dict[str, T] config: a configuration object to read config from.
        :param context_generators: Context generators
        :param bool report_exceptions: whether to automatically report exceptions on tasks or not.
        """
        self.initialize_honeybadger(config)
        self.context_generators = context_generators
        self.report_exceptions = report_exceptions
        task_prerun.connect(self.setup_context, weak=False)
        task_postrun.connect(self.reset_context, weak=False)
        if self.report_exceptions:
            task_failure.connect(self._failure_handler, weak=False)

        self._patch_generic_request_payload()
        logger.info('Registered Celery signal handlers')
Example #23
0
 def setUp(self):
     super(CSVImportTestCase, self).setUp()
     self.sent_tasks = defaultdict(list)
     self.cie = User.objects.create(username="******")
     p = self.cie.get_profile()
     p.is_contributor = True
     p.save()
     self.leading_group = GroupInfo.objects.create(name="leading_group", owner=self.cie, creator=self.cie)
     self.cie.groups.add(self.leading_group)
     self.user = User(username="******")
     self.user.email = "*****@*****.**"
     self.user.set_password("password")
     self.user.save()
     self.user.get_profile().is_contributor = True
     self.user.get_profile().save()
     self.group = GroupInfo(name="grp", owner=self.user, creator=self.user, description="grp")
     self.group.save()
     self.user.groups.add(self.group)
     self.client.post("/login/", {"username": "******", "password": "******"})
     task_prerun.connect(self.task_sent_handler)
Example #24
0
    def init_app(self, app):
        self.app = app
        self.app_ctx = app.app_context()
        # base_url = self._parse_app_url(app)
        # self.req_ctx = app.test_request_context(base_url=base_url)
        self.req_ctx = app.test_request_context()
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config["CELERY_BROKER_URL"],
            backend=app.config["CELERY_RESULT_BACKEND"],
            enable_utc=True,
            timezone=app.config["BABEL_DEFAULT_TIMEZONE"],
        )

        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)
        self.celery.conf["BROKER_HEARTBEAT"] = 0

        celeryd_init.connect(self._worker_process_init)

        task_postrun.connect(self._task_postrun)
        task_prerun.connect(self._task_prerun)
Example #25
0
    def __init__(self, parent, **kwargs):
        super().__init__(parent, **kwargs)
        from celery.signals import (
            before_task_publish,
            after_task_publish,
            task_prerun,
            task_retry,
            task_success,
            task_failure,
            task_revoked,
            task_unknown,
            task_rejected,
        )

        before_task_publish.connect(receivers.receiver_before_task_publish)
        after_task_publish.connect(receivers.receiver_after_task_publish)
        task_prerun.connect(receivers.receiver_task_pre_run)
        task_retry.connect(receivers.receiver_task_retry)
        task_success.connect(receivers.receiver_task_success)
        task_failure.connect(receivers.receiver_task_failure)
        task_revoked.connect(receivers.receiver_task_revoked)
        task_unknown.connect(receivers.receiver_task_unknown)
        task_rejected.connect(receivers.receiver_task_rejected)
Example #26
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    return True
Example #27
0
            BucketStatusCacher().store(bs)

            stat_checker = StatChecker(cfg.COUCHBASE_IP +":"+cfg.COUCHBASE_PORT,
                                       bucket = bucket,
                                       username = cfg.COUCHBASE_USER,
                                       password = cfg.COUCHBASE_PWD)
            while not stat_checker.check(workload.preconditions):
                time.sleep(1)
            prevWorkload.active = False
            WorkloadCacher().store(prevWorkload)
            bs = BucketStatusCacher().bucketstatus(bucket)
            bs.unblock(bucket)
            BucketStatusCacher().store(bs)
            

task_prerun.connect(task_prerun_handler)



"""Retrieve all pending tasks from running workloads and distributes to workers
"""
@celery.task(base = PersistedMQ, ignore_result = True)
def taskScheduler():

    cache = WorkloadCacher()
    workloads = cache.workloads

    rabbitHelper = taskScheduler.rabbitHelper
    tasks = []

    for workload in workloads:
Example #28
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     task_prerun.connect(self.on_task_start)
     task_postrun.connect(self.on_start_end)
Example #29
0
def restore_schema(task, **kwargs):
    """ Switches the schema back to the one from before running the task. """
    from .compat import get_public_schema_name

    schema_name = get_public_schema_name()
    include_public = True

    if hasattr(task, '_old_schema'):
        schema_name, include_public = task._old_schema

    # If the schema names match, don't do anything.
    if connection.schema_name == schema_name:
        return

    connection.set_schema(schema_name, include_public=include_public)


task_prerun.connect(switch_schema, sender=None,
                    dispatch_uid='tenant_schemas_switch_schema')

task_postrun.connect(restore_schema, sender=None,
                    dispatch_uid='tenant_schemas_restore_schema')


class CeleryApp(Celery):
    def create_task_cls(self):
        return self.subclass_with_self('tenant_schemas_celery.task:TenantTask',
                                       abstract=True, name='TenantTask',
                                       attribute='_app')
Example #30
0
    connection.set_tenant(tenant, include_public=True)


def restore_schema(task, **kwargs):
    """ Switches the schema back to the one from before running the task. """
    from tenant_schemas.utils import get_public_schema_name

    schema_name, include_public = getattr(task,
                                          '_old_schema',
                                          (get_public_schema_name(), True))

    # If the schema names match, don't do anything.
    if connection.schema_name == schema_name:
        return

    connection.set_schema(schema_name, include_public=include_public)


task_prerun.connect(switch_schema, sender=None,
                    dispatch_uid='tenant_schemas_switch_schema')

task_postrun.connect(restore_schema, sender=None,
                    dispatch_uid='tenant_schemas_restore_schema')


class CeleryApp(Celery):
    def create_task_cls(self):
        return self.subclass_with_self('tenant_schemas_celery.task:TenantTask',
                                       abstract=True, name='TenantTask',
                                       attribute='_app')
Example #31
0
    connection.set_tenant(tenant, include_public=True)


def restore_schema(task, **kwargs):
    """ Switches the schema back to the one from before running the task. """
    from django_tenants.utils import get_public_schema_name

    schema_name, include_public = getattr(task,
                                          '_old_schema',
                                          (get_public_schema_name(), True))

    # If the schema names match, don't do anything.
    if connection.schema_name == schema_name:
        return

    connection.set_schema(schema_name, include_public=include_public)


task_prerun.connect(switch_schema, sender=None,
                    dispatch_uid='django_tenants_switch_schema')

task_postrun.connect(restore_schema, sender=None,
                    dispatch_uid='django_tenants_restore_schema')


class CeleryApp(Celery):
    def create_task_cls(self):
        return self.subclass_with_self('django_tenants_celery.task:TenantTask',
                                       abstract=True, name='TenantTask',
                                       attribute='_app')
Example #32
0
    task = ManagedTask.objects.get(celery_task_id=task_id)
    task.celery_task_status = 5
    task.is_enable = False
    task.save(check_enable=False)


def on_task_rejected(*args, **kwargs):
    task = kwargs['sender']

    task_id = task.request.id
    task = ManagedTask.objects.get(celery_task_id=task_id)
    task.celery_task_status = 6
    task.is_enable = False
    task.save(check_enable=False)


def on_task_worker_shutting_down(*args, **kwargs):
    ManagedTask.objects.filter(is_enable=True).update(celery_task_status=6)


worker_ready.connect(on_worker_ready, dispatch_uid='on_worker_ready')
task_prerun.connect(on_task_prerun, dispatch_uid='on_task_prerun')
task_postrun.connect(on_task_finished, dispatch_uid='on_task_postrun')

task_success.connect(on_task_success, dispatch_uid='on_task_success')
task_retry.connect(on_task_retry, dispatch_uid='on_task_retry')
task_failure.connect(on_task_failure, dispatch_uid='on_task_failure')
task_revoked.connect(on_task_revoked, dispatch_uid='on_task_revoked')
task_rejected.connect(on_task_rejected, dispatch_uid='on_task_rejected')
worker_shutting_down.connect(on_task_worker_shutting_down,
                             dispatch_uid='on_task_worker_shutting')
Example #33
0
            stat_checker = StatChecker(cfg.COUCHBASE_IP + ":" +
                                       cfg.COUCHBASE_PORT,
                                       bucket=bucket,
                                       username=cfg.COUCHBASE_USER,
                                       password=cfg.COUCHBASE_PWD)
            while not stat_checker.check(workload.preconditions):
                time.sleep(1)
            prevWorkload.active = False
            WorkloadCacher().store(prevWorkload)
            bs = BucketStatusCacher().bucketstatus(bucket)
            bs.unblock(bucket)
            BucketStatusCacher().store(bs)


task_prerun.connect(task_prerun_handler)
"""Retrieve all pending tasks from running workloads and distributes to workers
"""


@celery.task(base=PersistedMQ, ignore_result=True)
def taskScheduler():

    cache = WorkloadCacher()
    workloads = cache.workloads

    rabbitHelper = taskScheduler.rabbitHelper
    tasks = []

    for workload in workloads:
        if workload.active:
Example #34
0
 def _install_patches(self):
     Task.apply_async = task_apply_async_wrapper
     before_task_publish.connect(before_task_publish_handler)
     task_prerun.connect(task_prerun_handler)
     task_success.connect(task_success_handler)
     task_failure.connect(task_failure_handler)
Example #35
0
            (icon, style))
    status_icon.short_description = 'Status'


# listeners to celery signals to store start and end time for tasks
# NOTE: these functions do not filter on the sender/task function

def taskresult_start(sender, task_id, **kwargs):
    try:
        tr = TaskResult.objects.get(task_id=task_id)
        tr.task_start = datetime.now()
        tr.save()
    except Exception as err:
        logger.error("Error saving task start time: %s", err)
        logger.debug("Stack trace for task start time error:\n" + traceback.format_exc())

task_prerun.connect(taskresult_start)


def taskresult_end(sender, task_id, **kwargs):
    try:
        tr = TaskResult.objects.get(task_id=task_id)
        tr.task_end = datetime.now()
        tr.save()
    except Exception as err:
        logger.error("Error saving task end time: %s", err)
        logger.debug("Stack trace for task end time error:\n" + traceback.format_exc())

task_postrun.connect(taskresult_end)

Example #36
0
 def install():
     task_prerun.connect(task_prerun_handler)
     task_postrun.connect(task_postrun_handler)
Example #37
0
            stop_dynamic_analysis(analyzer=analyzer,
                                  pkgname=pkgname,
                                  language=language,
                                  pkgversion=pkgversion)
        except Exception as e:
            logger.error(
                "fail to stop dynamic_worker for pkg %s language %s version %s: %s",
                pkgname, language, pkgversion, str(e))
        if analyzer.FAILURE_FILE and exists(dirname(analyzer.FAILURE_FILE)):
            open(analyzer.FAILURE_FILE, 'a').write(pkgname + '\n')
        return 0
    except Exception as e:
        logger.error("dynamic_worker: %s (type: %s)", str(e), type(e))
        try:
            stop_dynamic_analysis(analyzer=analyzer,
                                  pkgname=pkgname,
                                  language=language,
                                  pkgversion=pkgversion)
        except Exception as e:
            logger.error(
                "fail to stop dynamic_worker for pkg %s language %s version %s: %s",
                pkgname, language, pkgversion, str(e))
        if analyzer.FAILURE_FILE and exists(dirname(analyzer.FAILURE_FILE)):
            open(analyzer.FAILURE_FILE, 'a').write(pkgname + '\n')
        return 0


# need to use registered instance for sender argument.
task_prerun.connect(init_task)
after_setup_task_logger.connect(setup_logging)
Example #38
0
def setup():
    task_prerun.connect(tornado_prerun)
    task_postrun.connect(tornado_postrun)
Example #39
0
# SPDX-License-Identifier: GPL-3.0-or-later
import sys

import celery
from celery.signals import celeryd_init, task_postrun, task_prerun

from cachito.workers.celery_logging import (
    cleanup_task_logging,
    cleanup_task_logging_customization,
    setup_task_logging,
    setup_task_logging_customization,
)
from cachito.workers.config import configure_celery, validate_celery_config

# Workaround https://github.com/celery/celery/issues/5416
if celery.version_info < (4, 3) and sys.version_info >= (
        3, 7):  # pragma: no cover
    from re import Pattern
    from celery.app.routes import re as routes_re

    routes_re._pattern_type = Pattern

app = celery.Celery()
configure_celery(app)
celeryd_init.connect(validate_celery_config)
task_prerun.connect(setup_task_logging_customization)
task_prerun.connect(setup_task_logging)
task_postrun.connect(cleanup_task_logging_customization)
task_postrun.connect(cleanup_task_logging)
Example #40
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     task_prerun.connect(self.on_task_start)
     task_postrun.connect(self.on_start_end)
Example #41
0
 def install(self):
     task_prerun.connect(self.handle_task_prerun, weak=False)
     task_postrun.connect(self.handle_task_postrun, weak=False)
     task_failure.connect(self.process_failure_signal, weak=False)
Example #42
0
def register_handlers(taskclass):
    task_sent.connect(on_task_sent, tasks[taskclass.name])
    task_prerun.connect(on_task_prerun, tasks[taskclass.name])
    task_postrun.connect(on_task_postrun, tasks[taskclass.name])
    task_failure.connect(on_task_failure, tasks[taskclass.name])
    return taskclass