コード例 #1
0
def install():
    installed = scout_apm.core.install()
    if installed is False:
        return

    task_prerun.connect(prerun_callback)
    task_postrun.connect(postrun_callback)
コード例 #2
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    def after_setup_logger_signal(sender=None, logger=None, loglevel=None,
                                  logfile=None, format=None,
                                  colorize=None, **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config['enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_success.connect(postrun_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
コード例 #3
0
    def __init__(self,
                 model,
                 key='pk',
                 value=None,
                 instances=False,
                 auto_create=False,
                 *args,
                 **kwargs):
        assert value is not None

        super(ModelDict, self).__init__(*args, **kwargs)

        cls_name = type(self).__name__
        model_name = model.__name__

        self.key = key
        self.value = value

        self.model = model
        self.instances = instances
        self.auto_create = auto_create

        self.cache_key = '%s:%s:%s' % (cls_name, model_name, self.key)
        self.last_updated_cache_key = '%s.last_updated:%s:%s' % (
            cls_name, model_name, self.key)

        request_finished.connect(self._cleanup)
        post_save.connect(self._post_save, sender=model)
        post_delete.connect(self._post_delete, sender=model)

        if has_celery:
            task_postrun.connect(self._cleanup)
コード例 #4
0
ファイル: sea.py プロジェクト: HeathLee/peeweext
 def _try_setup_celery(self):
     try:
         from celery.signals import task_prerun, task_postrun
         task_prerun.connect(lambda *arg, **kw: self.connect_db())
         task_postrun.connect(lambda *arg, **kw: self.close_db())
     except ImportError:
         pass
コード例 #5
0
    def test_batch(self):
        """Tests whether batching works."""
        self._clear_index()
        video1 = self.create_video(name='Video1', update_index=False)
        video2 = self.create_video(name='Video2', update_index=False)
        video3 = self.create_video(name='Video3', update_index=False)
        expected = set()
        results = set((r.pk for r in SearchQuerySet()))
        self.assertEqual(results, expected)

        self.batches = 0

        def count_batch(sender, **kwargs):
            self.batches = self.batches + 1

        task_postrun.connect(count_batch, sender=haystack_update)

        haystack_batch_update.apply(args=(Video._meta.app_label,
                                          Video._meta.module_name),
                                    kwargs={'batch_size': 1})
        self.assertEqual(self.batches, 3)

        expected = set((video1.pk, video2.pk, video3.pk))
        results = set((int(r.pk) for r in SearchQuerySet()))
        self.assertEqual(results, expected)
コード例 #6
0
  def __init__(self, path=None):
    self.__dict__ = self.__state
    if not path:
      if not self.path:
        raise KitError('No path specified')

    else:
      path = abspath(path)

      if self.path and path != self.path:
        raise KitError('Invalid path specified: %r' % path)

      elif not self.path:
        self.path = path

        with open(path) as handle:
          self.config = load(handle)

        if self.root not in sys_path:
          sys_path.insert(0, self.root)

        for module in self._modules:
          __import__(module)

        # Session removal handlers
        task_postrun.connect(_remove_session)
        request_tearing_down.connect(_remove_session)
コード例 #7
0
    def test_remove(self):
        """
        ``remove`` kwarg should be passed on to the batches.

        """
        self._clear_index()
        video1 = self.create_video(name='Video1', update_index=False)

        def get_remove_passed(sender, **kwargs):
            self.assertTrue('remove' in kwargs['kwargs'])
            self.remove = kwargs['kwargs']['remove']

        task_postrun.connect(get_remove_passed, sender=haystack_update)

        expected = True
        haystack_batch_update.apply(args=(Video._meta.app_label,
                                          Video._meta.module_name),
                                    kwargs={'remove': expected})
        self.assertEqual(self.remove, expected)

        expected = False
        haystack_batch_update.apply(args=(Video._meta.app_label,
                                          Video._meta.module_name),
                                    kwargs={'remove': expected})
        self.assertEqual(self.remove, expected)
コード例 #8
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    def after_setup_logger_signal(sender=None, logger=None, loglevel=None,
                                  logfile=None, format=None,
                                  colorize=None, **kwargs):
        if APPENLIGHT_CLIENT.config['logging'] and APPENLIGHT_CLIENT.config['enabled']:
            APPENLIGHT_CLIENT.register_logger(logger)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    after_setup_logger.connect(after_setup_logger_signal, weak=False)
    return True
コード例 #9
0
    def test_index_updates(self):
        """Test that index updates are only run at the end of an update."""
        self.updates = 0
        self.removals = 0
        self._clear_index()

        def count_update(sender, **kwargs):
            self.updates += 1
        task_postrun.connect(count_update, sender=haystack_update)

        def count_removal(sender, **kwargs):
            self.removals += 1
        task_postrun.connect(count_removal, sender=haystack_remove)

        feed = self.create_feed('http://google.com')
        feed_import = FeedImport.objects.create(source=feed,
                                                auto_approve=True)
        video_iter = [
            self.create_vidscraper_video(),
            self.create_vidscraper_video(),
            self.create_vidscraper_video(),
        ]
        Source.update(feed, video_iter, feed_import)
        self.assertEqual(self.updates, 1)
        self.assertEqual(self.removals, 0)
        self.assertEqual(SearchQuerySet().count(), len(video_iter))
コード例 #10
0
    def add_run_signals(cls, dapper_local):
        from celery.signals import task_prerun, task_postrun, task_failure, task_revoked
        task_prerun.connect(cls.task_begin)
        task_postrun.connect(cls.task_end)
        task_failure.connect(cls.task_fail)
        task_revoked.connect(cls.task_revoked)

        cls.dapper_local = dapper_local
コード例 #11
0
 def connect(self, task_started, task_retried, task_failed, task_succeeded):
     """Connect the celery BPMTask events to callback celery tasks."""
     self.task_started = task_started
     self.task_retried = task_retried
     self.task_failed = task_failed
     self.task_succeeded = task_succeeded
     task_prerun.connect(self._task_prerun)
     task_postrun.connect(self._task_postrun)
コード例 #12
0
def install():
    installed = scout_apm.core.install()
    if not installed:
        return

    before_task_publish.connect(before_publish_callback)
    task_prerun.connect(prerun_callback)
    task_postrun.connect(postrun_callback)
コード例 #13
0
def celery_enable_all():
    """Enable johnny-cache in all celery tasks, clearing the local-store
    after each task."""
    from celery.signals import task_prerun, task_postrun, task_failure
    task_prerun.connect(prerun_handler)
    task_postrun.connect(postrun_handler)
    # Also have to cleanup on failure.
    task_failure.connect(postrun_handler)
コード例 #14
0
ファイル: __init__.py プロジェクト: ssfdust/smorest-sfs
    def update_celery(self, new_celery: celery.Celery) -> None:
        if self.app:
            self.celery.__dict__.update(vars(new_celery))
            self.celery.conf.update(self.app.config.get_namespace("CELERY_"))

            worker_process_init.connect(self._worker_process_init)

            task_postrun.connect(self._task_postrun)
            task_prerun.connect(self._task_prerun)
コード例 #15
0
ファイル: tracing.py プロジェクト: owais/python-celery
 def connect_traced_handlers(self):
     if self._propagate:
         before_task_publish.connect(self._prepublish, weak=False)
         after_task_publish.connect(self._postpublish, weak=False)
     task_prerun.connect(self._start_span, weak=False)
     task_failure.connect(self._tag_error, weak=False)
     task_retry.connect(self._tag_retry, weak=False)
     task_postrun.connect(self._finish_span, weak=False)
     log.debug('Registered CeleryTracing signal handlers.')
コード例 #16
0
ファイル: celery.py プロジェクト: vincenthcui/sentry-python
    def setup_once():
        task_prerun.connect(_handle_task_prerun, weak=False)
        task_postrun.connect(_handle_task_postrun, weak=False)
        task_failure.connect(_process_failure_signal, weak=False)

        # This logger logs every status of every task that ran on the worker.
        # Meaning that every task's breadcrumbs are full of stuff like "Task
        # <foo> raised unexpected <bar>".
        ignore_logger("celery.worker.job")
コード例 #17
0
 def _register_handlers(self, app):
     app.before_request(self.connect_db)
     app.teardown_request(self.close_db)
     try:
         from celery.signals import task_prerun, task_postrun
         task_prerun.connect(lambda *arg, **kw: self.connect_db())
         task_postrun.connect(lambda *arg, **kw: self.close_db(None))
     except ImportError:
         pass
コード例 #18
0
ファイル: impl.py プロジェクト: Gandi/python-zipkin
def bind(endpoint=None):
    if not endpoint:
        endpoint = Endpoint("Celery")

    events.endpoint = endpoint

    log.info("Attaching zipkin to celery signals")
    before_task_publish.connect(events.task_send_handler)
    task_prerun.connect(events.task_prerun_handler)
    task_postrun.connect(events.task_postrun_handler)
    log.info("zipkin signals attached")
コード例 #19
0
    def on_worker_ready(self, sender, **_kwargs):
        task_prerun.connect(self.on_task_prerun)
        task_postrun.connect(self.on_task_postrun)

        # TODO Similar metrics for other pool implementations.
        if ThreadTaskPool and isinstance(sender.pool, ThreadTaskPool):
            collector = CeleryThreadPoolCollector(sender.pool.executor)
            prometheus_client.REGISTRY.register(collector)

        prometheus_client.start_http_server(9000)
        log.info("Prometheus exporter started for Celery worker on :9000")
コード例 #20
0
def install(app=None):
    if app is not None:
        copy_configuration(app)

    installed = scout_apm.core.install()
    if not installed:
        return

    before_task_publish.connect(before_task_publish_callback)
    task_prerun.connect(task_prerun_callback)
    task_postrun.connect(task_postrun_callback)
コード例 #21
0
def connect(app):
    task_prerun.connect(report_monitor_begin, weak=False)
    task_postrun.connect(report_monitor_complete, weak=False)

    # XXX(dcramer): Celery docs suggest it should be app.conf.beat_schedule, which
    # was likely a change in 4.x. This code is intended to support "any celery" and be
    # adopted into sentry-sdk core, thus we support it here.
    schedule = app.conf.beat_schedule if hasattr(
        app.conf, 'beat_schedule') else app.conf['CELERYBEAT_SCHEDULE']
    for schedule_name, monitor_id in six.iteritems(
            settings.SENTRY_CELERYBEAT_MONITORS):
        schedule[schedule_name].setdefault('options', {}).setdefault(
            'headers', {}).setdefault('X-Sentry-Monitor', monitor_id)
コード例 #22
0
    def init_app(self, app, sentry=None):
        self.app = app
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config['CELERY_BROKER_URL'],
            backend=app.config['CELERY_RESULT_BACKEND'],
        )
        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)

        worker_process_init.connect(self._worker_process_init)

        task_postrun.connect(self._task_postrun)
        task_prerun.connect(self._task_prerun)
コード例 #23
0
ファイル: celery.py プロジェクト: jensneuhaus/zeus
    def init_app(self, app):
        self.app = app
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config["CELERY_BROKER_URL"],
            backend=app.config["CELERY_RESULT_BACKEND"],
        )
        # XXX(dcramer): why the hell am I wasting time trying to make Celery work?
        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)

        worker_process_init.connect(self._worker_process_init)

        task_postrun.connect(self._task_postrun)
        task_prerun.connect(self._task_prerun)
コード例 #24
0
ファイル: monitors.py プロジェクト: yaoqi/sentry
def connect(app):
    task_prerun.connect(report_monitor_begin, weak=False)
    task_postrun.connect(report_monitor_complete, weak=False)

    # XXX(dcramer): Celery docs suggest it should be app.conf.beat_schedule, which
    # was likely a change in 4.x. This code is intended to support "any celery" and be
    # adopted into sentry-sdk core, thus we support it here.
    schedule = app.conf.beat_schedule if hasattr(
        app.conf, 'beat_schedule') else app.conf['CELERYBEAT_SCHEDULE']
    for schedule_name, monitor_id in six.iteritems(settings.SENTRY_CELERYBEAT_MONITORS):
        schedule[schedule_name].setdefault(
            'options',
            {}).setdefault(
            'headers',
            {}).setdefault(
            'X-Sentry-Monitor',
            monitor_id)
コード例 #25
0
    def init_app(self, app, sentry):
        self.app = app
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config['CELERY_BROKER_URL'],
            backend=app.config['CELERY_RESULT_BACKEND'],
        )
        # XXX(dcramer): why the hell am I wasting time trying to make Celery work?
        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)

        task_prerun.connect(self._task_prerun)
        task_postrun.connect(self._task_postrun)

        if sentry:
            register_signal(sentry.client)
            register_logger_signal(sentry.client)
コード例 #26
0
    def provide_celery(self, app: Flask) -> Celery:
        celery = Celery(
            app.import_name,
            backend=app.config["CELERY_RESULT_BACKEND"],
            broker=app.config["CELERY_BROKER_URL"],
        )
        celery.conf["CELERYD_HIJACK_ROOT_LOGGER"] = False
        celery.conf["CELERY_IMPORTS"] = (
            "matcher.tasks.export",
            "matcher.tasks.import_",
            "matcher.tasks.object",
        )
        celery.conf.update(app.config)
        celery.conf.ONCE = {
            "backend": "celery_once.backends.Redis",
            "settings": {
                "url": app.config["CELERY_RESULT_BACKEND"],
                "default_timeout": 60 * 60,
            },
        }

        class ContextTask(Task):
            def __call__(self, *args, **kwargs):
                with app.app_context():
                    return self.run(*args, **kwargs)

        class OnceTask(QueueOnce):
            def __call__(self, *args, **kwargs):
                with app.app_context():
                    return self.run(*args, **kwargs)

        celery.Task = ContextTask
        celery.OnceTask = OnceTask

        def handle_celery_postrun(retval=None, *args, **kwargs):
            """After each Celery task, teardown our db session"""
            if app.config["SQLALCHEMY_COMMIT_ON_TEARDOWN"]:
                if not isinstance(retval, Exception):
                    db.session.commit()
            # If we aren't in an eager request (i.e. Flask will perform teardown), then teardown
            if not app.config["CELERY_ALWAYS_EAGER"]:
                db.session.remove()

        task_postrun.connect(handle_celery_postrun)

        return celery
コード例 #27
0
    def install(self,
                config={},
                context_generators={},
                report_exceptions=False):
        """
        Setup Celery - Honeybadger integration.
        :param dict[str, T] config: a configuration object to read config from.
        :param context_generators: Context generators
        :param bool report_exceptions: whether to automatically report exceptions on tasks or not.
        """
        self.initialize_honeybadger(config)
        self.context_generators = context_generators
        self.report_exceptions = report_exceptions
        task_prerun.connect(self.setup_context, weak=False)
        task_postrun.connect(self.reset_context, weak=False)
        if self.report_exceptions:
            task_failure.connect(self._failure_handler, weak=False)

        self._patch_generic_request_payload()
        logger.info('Registered Celery signal handlers')
コード例 #28
0
    def init_app(self, app):
        self.app = app
        self.app_ctx = app.app_context()
        # base_url = self._parse_app_url(app)
        # self.req_ctx = app.test_request_context(base_url=base_url)
        self.req_ctx = app.test_request_context()
        new_celery = celery.Celery(
            app.import_name,
            broker=app.config["CELERY_BROKER_URL"],
            backend=app.config["CELERY_RESULT_BACKEND"],
            enable_utc=True,
            timezone=app.config["BABEL_DEFAULT_TIMEZONE"],
        )

        self.celery.__dict__.update(vars(new_celery))
        self.celery.conf.update(app.config)
        self.celery.conf["BROKER_HEARTBEAT"] = 0

        celeryd_init.connect(self._worker_process_init)

        task_postrun.connect(self._task_postrun)
        task_prerun.connect(self._task_prerun)
コード例 #29
0
ファイル: tasks.py プロジェクト: MechanisM/mirocommunity
    def test_remove(self):
        """
        ``remove`` kwarg should be passed on to the batches.

        """
        self._clear_index()
        video1 = self.create_video(name='Video1', update_index=False)
        def get_remove_passed(sender, **kwargs):
            self.assertTrue('remove' in kwargs['kwargs'])
            self.remove = kwargs['kwargs']['remove']
        task_postrun.connect(get_remove_passed, sender=haystack_update)

        expected = True
        haystack_batch_update.apply(args=(Video._meta.app_label,
                                          Video._meta.module_name),
                                    kwargs={'remove': expected})
        self.assertEqual(self.remove, expected)

        expected = False
        haystack_batch_update.apply(args=(Video._meta.app_label,
                                          Video._meta.module_name),
                                    kwargs={'remove': expected})
        self.assertEqual(self.remove, expected)
コード例 #30
0
ファイル: tasks.py プロジェクト: MechanisM/mirocommunity
    def test_batch(self):
        """Tests whether batching works."""
        self._clear_index()
        video1 = self.create_video(name='Video1', update_index=False)
        video2 = self.create_video(name='Video2', update_index=False)
        video3 = self.create_video(name='Video3', update_index=False)
        expected = set()
        results = set((r.pk for r in SearchQuerySet()))
        self.assertEqual(results, expected)

        self.batches = 0
        def count_batch(sender, **kwargs):
            self.batches = self.batches + 1
        task_postrun.connect(count_batch, sender=haystack_update)

        haystack_batch_update.apply(args=(Video._meta.app_label,
                                          Video._meta.module_name),
                                    kwargs={'batch_size': 1})
        self.assertEqual(self.batches, 3)

        expected = set((video1.pk, video2.pk, video3.pk))
        results = set((int(r.pk) for r in SearchQuerySet()))
        self.assertEqual(results, expected)
コード例 #31
0
def generateCsvResults(request):
    my_lock = redis.Redis().lock("Cargar")
    try:
        have_lock = my_lock.acquire(blocking=False)
        if have_lock:
            if (not utils.allLoaded()
                    or request.session.get('calculationStatus', -1) == 0):
                return JsonResponse({
                    'Error':
                    "Faltan cargar matrices o se estan cargando alguna"
                })
            indvList, dictParam, dictSettings = utils.getIndivList_ParamDict_SettingsDict(
                request.GET, request.COOKIES)
            print(dictParam)
            utils.writeSettings(str(request.user.id), dictSettings, dictParam)
            asyncKey = delegator.apply_async(args=[
                request.GET, request.session.session_key, request.COOKIES,
                str(request.user.id)
            ],
                                             queue='delegate')
            request.session['asyncKey'] = asyncKey.id
            request.session['calculationStatus'] = 0
            response = redirect('index')
            task_postrun.connect(shutdown_worker, sender=delegator)
            return response
        else:
            return redirect('index')
            print("Did not acquire lock.")
    except:
        request.session['calculationStatus'] = -1
        print("Crash")
        return redirect('index')
    finally:
        if have_lock:
            my_lock.release()

    return response
コード例 #32
0
def create_app(config):
    """ Create a fully configured Celery application object.

    Args:
        config (Config): A reference to a lightflow configuration object.

    Returns:
        Celery: A fully configured Celery application object.
    """

    # configure the celery logging system with the lightflow settings
    setup_logging.connect(partial(_initialize_logging, config), weak=False)
    task_postrun.connect(partial(_cleanup_workflow, config), weak=False)

    # patch Celery to use cloudpickle instead of pickle for serialisation
    patch_celery()

    # create the main celery app and load the configuration
    app = Celery('lightflow')
    app.conf.update(**config.celery)

    # overwrite user supplied settings to make sure celery works with lightflow
    app.conf.update(task_serializer='pickle',
                    accept_content=['pickle'],
                    result_serializer='pickle',
                    task_default_queue=DefaultJobQueueName.Task)

    if isinstance(app.conf.include, list):
        app.conf.include.extend(LIGHTFLOW_INCLUDE)
    else:
        if len(app.conf.include) > 0:
            raise ConfigOverwriteError(
                'The content in the include config will be overwritten')
        app.conf.include = LIGHTFLOW_INCLUDE

    return app
コード例 #33
0
ファイル: models.py プロジェクト: maxiberta/django-modeldict
    def __init__(self, model, key='pk', value=None, instances=False, auto_create=False, *args, **kwargs):
        assert value is not None

        super(ModelDict, self).__init__(*args, **kwargs)

        cls_name = type(self).__name__
        model_name = model.__name__

        self.key = key
        self.value = value

        self.model = model
        self.instances = instances
        self.auto_create = auto_create

        self.remote_cache_key = '%s:%s:%s' % (cls_name, model_name, self.key)
        self.remote_cache_last_updated_key = '%s.last_updated:%s:%s' % (cls_name, model_name, self.key)

        request_finished.connect(self._cleanup)
        post_save.connect(self._post_save, sender=model)
        post_delete.connect(self._post_delete, sender=model)

        if has_celery:
            task_postrun.connect(self._cleanup)
コード例 #34
0
def register_signals(APPENLIGHT_CLIENT):

    def prerun_signal(sender, task_id, task, args, kwargs, *aargs, **kwds):
        task._appenlight_start_time = datetime.utcnow()

    def postrun_signal(sender, task_id, task, args, kwargs, retval, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(task, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ, gather_exception=False,
                    start_time=start_time, end_time=end_time)

    def failure_signal(sender, task_id, exception, args, kwargs, traceback,
                       einfo, *aargs, **kwds):
        end_time = datetime.utcnow()
        start_time = getattr(sender, '_appenlight_start_time')
        fake_environ = {'appenlight.view_name': 'celery:' + sender.name}
        gather_data(APPENLIGHT_CLIENT, fake_environ,
                    start_time=start_time, end_time=end_time)

    task_prerun.connect(prerun_signal, weak=False)
    task_postrun.connect(postrun_signal, weak=False)
    task_failure.connect(failure_signal, weak=False)
    return True
コード例 #35
0
                if len(indexed_keys) > 0:
                   if keys is not None and len(keys) > 0:
                       updateQueryBuilders.apply_async(args=[template, bucket, keys[0]])

                # put created item into specified cc_queues (if specified)
                # and item is not set to expire
                if template["cc_queues"] is not None and template["ttl"] == 0:
                    for queue in template["cc_queues"]:
                        queue = str(queue)
                        rabbitHelper.declare(queue)
                        if keys is not None and len(keys) > 0:
                            rabbitHelper.putMsg(queue, json.dumps(keys))
        else:
            logger.error("Error during multi set")

task_postrun.connect(task_postrun_handler, weak=False)

"""
Generates list of tasks to run based on params passed in to workload
"""
@celery.task(base = PersistedMQ)
def queue_op_cycles(workload):


    # read doc template
    template = Template.from_cache(str(workload.template))
    if template is None:
        logger.error("no doc template imported")
        return

    rabbitHelper = queue_op_cycles.rabbitHelper
コード例 #36
0
 def contribute_to_class(self, model, name):
     super(UserOptionManager, self).contribute_to_class(model, name)
     task_postrun.connect(self.clear_cache)
     request_finished.connect(self.clear_cache)
コード例 #37
0
ファイル: groupmeta.py プロジェクト: BlueMoebius/sentry
 def contribute_to_class(self, model, name):
     model.CacheNotPopulated = CacheNotPopulated
     super(GroupMetaManager, self).contribute_to_class(model, name)
     task_postrun.connect(self.clear_local_cache)
     request_finished.connect(self.clear_local_cache)
コード例 #38
0
ファイル: logger.py プロジェクト: jiaxiangkong/jumpserver
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     task_prerun.connect(self.on_task_start)
     task_postrun.connect(self.on_start_end)
コード例 #39
0
ファイル: manager.py プロジェクト: Tug/sentry
 def __init__(self, *args, **kwargs):
     super(MetaManager, self).__init__(*args, **kwargs)
     task_postrun.connect(self.clear_cache)
     request_finished.connect(self.clear_cache)
     self.__metadata = {}
コード例 #40
0
 def contribute_to_class(self, model, name):
     super(OrganizationOptionManager, self).contribute_to_class(model, name)
     task_postrun.connect(self.clear_local_cache)
     request_finished.connect(self.clear_local_cache)
コード例 #41
0
                # allow multi set keys to be consumed
                keys = retval 

                # note template was converted to dict for mset
                template = args[1]
                if template["cc_queues"] is not None:
                    for queue in template["cc_queues"]:
                        queue = str(queue)
                        rabbitHelper.declare(queue)
                        if keys is not None and len(keys) > 0:
                            rabbitHelper.putMsg(queue, json.dumps(keys))
        else:
            logger.error("Error during multi set")
            logger.error(retval)

task_postrun.connect(task_postrun_handler)

"""Generates list of tasks to run based on params passed in to workload
until post conditions(if any) are hit
"""
@celery.task(base = PersistedMQ)
def run(workload, prevWorkload = None):
    rabbitHelper = run.rabbitHelper

    cache = WorkloadCacher()
    workload.active = True
    cache.store(workload)

    bucket = str(workload.bucket)
    task_queue = workload.task_queue
コード例 #42
0
ファイル: app.py プロジェクト: bufke/tenant-schemas-celery
    connection.set_tenant(tenant, include_public=True)


def restore_schema(task, **kwargs):
    """ Switches the schema back to the one from before running the task. """
    from tenant_schemas.utils import get_public_schema_name

    schema_name, include_public = getattr(task,
                                          '_old_schema',
                                          (get_public_schema_name(), True))

    # If the schema names match, don't do anything.
    if connection.schema_name == schema_name:
        return

    connection.set_schema(schema_name, include_public=include_public)


task_prerun.connect(switch_schema, sender=None,
                    dispatch_uid='tenant_schemas_switch_schema')

task_postrun.connect(restore_schema, sender=None,
                    dispatch_uid='tenant_schemas_restore_schema')


class CeleryApp(Celery):
    def create_task_cls(self):
        return self.subclass_with_self('tenant_schemas_celery.task:TenantTask',
                                       abstract=True, name='TenantTask',
                                       attribute='_app')
コード例 #43
0
ファイル: models.py プロジェクト: emory-libraries/eulcommon
            (icon, style))
    status_icon.short_description = 'Status'


# listeners to celery signals to store start and end time for tasks
# NOTE: these functions do not filter on the sender/task function

def taskresult_start(sender, task_id, **kwargs):
    try:
        tr = TaskResult.objects.get(task_id=task_id)
        tr.task_start = datetime.now()
        tr.save()
    except Exception as err:
        logger.error("Error saving task start time: %s", err)
        logger.debug("Stack trace for task start time error:\n" + traceback.format_exc())

task_prerun.connect(taskresult_start)


def taskresult_end(sender, task_id, **kwargs):
    try:
        tr = TaskResult.objects.get(task_id=task_id)
        tr.task_end = datetime.now()
        tr.save()
    except Exception as err:
        logger.error("Error saving task end time: %s", err)
        logger.debug("Stack trace for task end time error:\n" + traceback.format_exc())

task_postrun.connect(taskresult_end)

コード例 #44
0
ファイル: tasks.py プロジェクト: fnp/wolnelektury
# -*- coding: utf-8 -*-
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from celery.signals import task_postrun
from waiter.models import WaitedFile


def task_delete_after(task_id=None, **kwargs):
    WaitedFile.objects.filter(task_id=task_id).delete()
task_postrun.connect(task_delete_after)
コード例 #45
0
ファイル: manager.py プロジェクト: turleyhari/sentry
 def __init__(self, *args, **kwargs):
     super(UserOptionManager, self).__init__(*args, **kwargs)
     task_postrun.connect(self.clear_cache)
     request_finished.connect(self.clear_cache)
コード例 #46
0
ファイル: task.py プロジェクト: JaredKerim-Mozilla/zamboni
    This simply wraps celery's `@task` decorator and stores the task calls
    until after the request is finished, then fires them off.

    """
    abstract = True

    def original_apply_async(self, *args, **kwargs):
        return super(PostRequestTask, self).apply_async(*args, **kwargs)

    def apply_async(self, *args, **kwargs):
        _append_task((self, args, kwargs))


# Replacement `@task` decorator.
task = partial(base_task, base=PostRequestTask)


# Hook the signal handlers up.
# Send the tasks to celery when the request is finished.
request_finished.connect(_send_tasks,
                         dispatch_uid='request_finished_tasks')
# Also send the tasks when a task is finished (outside the request-response
# cycle, when a task calls another task).
task_postrun.connect(_send_tasks, dispatch_uid='tasks_finished_tasks')

# And make sure to discard the task queue when we have an exception in the
# request-response cycle.
got_request_exception.connect(_discard_tasks,
                              dispatch_uid='request_exception_tasks')
コード例 #47
0
ファイル: allPythonContent.py プロジェクト: Mondego/pyreco
 def install():
     task_prerun.connect(task_prerun_handler)
     task_postrun.connect(task_postrun_handler)
コード例 #48
0
ファイル: store.py プロジェクト: daevaorn/sentry
 def connect_signals(self):
     from celery.signals import task_postrun
     from django.core.signals import request_finished
     task_postrun.connect(self.maybe_clean_local_cache)
     request_finished.connect(self.maybe_clean_local_cache)
コード例 #49
0
ファイル: manager.py プロジェクト: turleyhari/sentry
 def __init__(self, field_name, *args, **kwargs):
     super(InstanceMetaManager, self).__init__(*args, **kwargs)
     self.field_name = field_name
     task_postrun.connect(self.clear_cache)
     request_finished.connect(self.clear_cache)
コード例 #50
0
ファイル: decorators.py プロジェクト: vitorio/ocropodium
def register_handlers(taskclass):
    task_sent.connect(on_task_sent, tasks[taskclass.name])
    task_prerun.connect(on_task_prerun, tasks[taskclass.name])
    task_postrun.connect(on_task_postrun, tasks[taskclass.name])
    task_failure.connect(on_task_failure, tasks[taskclass.name])
    return taskclass
コード例 #51
0
 def install(self):
     task_prerun.connect(self.handle_task_prerun, weak=False)
     task_postrun.connect(self.handle_task_postrun, weak=False)
     task_failure.connect(self.process_failure_signal, weak=False)
コード例 #52
0
ファイル: projectoption.py プロジェクト: PostPCEra/sentry
 def __init__(self, *args, **kwargs):
     super(ProjectOptionManager, self).__init__(*args, **kwargs)
     task_postrun.connect(self.clear_local_cache)
     request_finished.connect(self.clear_local_cache)
     self.__cache = {}