コード例 #1
0
 def create(self, w):
     semaphore = None
     max_restarts = None
     if w.app.conf.worker_pool in GREEN_POOLS:  # pragma: no cover
         warnings.warn(UserWarning(W_POOL_SETTING))
     threaded = not w.use_eventloop or IS_WINDOWS
     procs = w.min_concurrency
     w.process_task = w._process_task
     if not threaded:
         semaphore = w.semaphore = LaxBoundedSemaphore(procs)
         w._quick_acquire = w.semaphore.acquire
         w._quick_release = w.semaphore.release
         max_restarts = 100
         if w.pool_putlocks and w.pool_cls.uses_semaphore:
             w.process_task = w._process_task_sem
     allow_restart = w.pool_restarts
     pool = w.pool = self.instantiate(
         w.pool_cls, w.min_concurrency,
         initargs=(w.app, w.hostname),
         maxtasksperchild=w.max_tasks_per_child,
         max_memory_per_child=w.max_memory_per_child,
         timeout=w.time_limit,
         soft_timeout=w.soft_time_limit,
         putlocks=w.pool_putlocks and threaded,
         lost_worker_timeout=w.worker_lost_wait,
         threads=threaded,
         max_restarts=max_restarts,
         allow_restart=allow_restart,
         forking_enable=True,
         semaphore=semaphore,
         sched_strategy=self.optimization,
         app=w.app,
     )
     _set_task_join_will_block(pool.task_join_will_block)
     return pool
コード例 #2
0
ファイル: components.py プロジェクト: huyidao625/celery
 def create(self, w, semaphore=None, max_restarts=None):
     if w.app.conf.worker_pool in ('eventlet', 'gevent'):
         warnings.warn(UserWarning(W_POOL_SETTING))
     threaded = not w.use_eventloop or IS_WINDOWS
     procs = w.min_concurrency
     forking_enable = w.no_execv if w.force_execv else True
     w.process_task = w._process_task
     if not threaded:
         semaphore = w.semaphore = LaxBoundedSemaphore(procs)
         w._quick_acquire = w.semaphore.acquire
         w._quick_release = w.semaphore.release
         max_restarts = 100
         if w.pool_putlocks and w.pool_cls.uses_semaphore:
             w.process_task = w._process_task_sem
     allow_restart = self.autoreload_enabled or w.pool_restarts
     pool = w.pool = self.instantiate(
         w.pool_cls, w.min_concurrency,
         initargs=(w.app, w.hostname),
         maxtasksperchild=w.max_tasks_per_child,
         max_memory_per_child=w.max_memory_per_child,
         timeout=w.task_time_limit,
         soft_timeout=w.task_soft_time_limit,
         putlocks=w.pool_putlocks and threaded,
         lost_worker_timeout=w.worker_lost_wait,
         threads=threaded,
         max_restarts=max_restarts,
         allow_restart=allow_restart,
         forking_enable=forking_enable,
         semaphore=semaphore,
         sched_strategy=self.optimization,
     )
     _set_task_join_will_block(pool.task_join_will_block)
     return pool
コード例 #3
0
    def setUp(self):
        self._threads_at_setup = list(threading.enumerate())
        from celery import _state
        from celery import result
        result.task_join_will_block = \
            _state.task_join_will_block = lambda: False
        self._current_app = current_app()
        self._default_app = _state.default_app
        trap = Trap()
        self._prev_tls = _state._tls
        _state.set_default_app(trap)

        class NonTLS(object):
            current_app = trap

        _state._tls = NonTLS()

        self.app = self.Celery(set_as_current=False)
        if not self.contained:
            self.app.set_current()
        root = logging.getLogger()
        self.__rootlevel = root.level
        self.__roothandlers = root.handlers
        _state._set_task_join_will_block(False)
        try:
            self.setup()
        except:
            self._teardown_app()
            raise
コード例 #4
0
ファイル: result.py プロジェクト: joehybird/celery
def allow_join_result():
    reset_value = task_join_will_block()
    _set_task_join_will_block(False)
    try:
        yield
    finally:
        _set_task_join_will_block(reset_value)
コード例 #5
0
ファイル: case.py プロジェクト: Amber-Creative/ambererpnext
    def _teardown_app(self):
        from celery.utils.log import LoggingProxy
        assert sys.stdout
        assert sys.stderr
        assert sys.__stdout__
        assert sys.__stderr__
        this = self._get_test_name()
        if isinstance(sys.stdout, (LoggingProxy, Mock)) or \
                isinstance(sys.__stdout__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout'))
        if isinstance(sys.stderr, (LoggingProxy, Mock)) or \
                isinstance(sys.__stderr__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr'))
        backend = self.app.__dict__.get('backend')
        if backend is not None:
            if isinstance(backend, CacheBackend):
                if isinstance(backend.client, DummyClient):
                    backend.client.cache.clear()
                backend._cache.clear()
        from celery import _state
        _state._set_task_join_will_block(False)

        _state.set_default_app(self._default_app)
        _state._tls = self._prev_tls
        _state._tls.current_app = self._current_app
        if self.app is not self._current_app:
            self.app.close()
        self.app = None
        self.assertEqual(
            self._threads_at_setup, list(threading.enumerate()),
        )
コード例 #6
0
ファイル: components.py プロジェクト: niteoweb/celery3
 def create(self, w, semaphore=None, max_restarts=None):
     if w.app.conf.CELERYD_POOL in ("eventlet", "gevent"):
         warnings.warn(UserWarning(W_POOL_SETTING))
     threaded = not w.use_eventloop
     procs = w.min_concurrency
     forking_enable = w.no_execv if w.force_execv else True
     if not threaded:
         semaphore = w.semaphore = LaxBoundedSemaphore(procs)
         w._quick_acquire = w.semaphore.acquire
         w._quick_release = w.semaphore.release
         max_restarts = 100
     allow_restart = self.autoreload_enabled or w.pool_restarts
     pool = w.pool = self.instantiate(
         w.pool_cls,
         w.min_concurrency,
         initargs=(w.app, w.hostname),
         maxtasksperchild=w.max_tasks_per_child,
         timeout=w.task_time_limit,
         soft_timeout=w.task_soft_time_limit,
         putlocks=w.pool_putlocks and threaded,
         lost_worker_timeout=w.worker_lost_wait,
         threads=threaded,
         max_restarts=max_restarts,
         allow_restart=allow_restart,
         forking_enable=forking_enable,
         semaphore=semaphore,
         sched_strategy=self.optimization,
     )
     _set_task_join_will_block(pool.task_join_will_block)
     return pool
コード例 #7
0
ファイル: case.py プロジェクト: Amber-Creative/ambererpnext
    def setUp(self):
        self._threads_at_setup = list(threading.enumerate())
        from celery import _state
        from celery import result
        result.task_join_will_block = \
            _state.task_join_will_block = lambda: False
        self._current_app = current_app()
        self._default_app = _state.default_app
        trap = Trap()
        self._prev_tls = _state._tls
        _state.set_default_app(trap)

        class NonTLS(object):
            current_app = trap
        _state._tls = NonTLS()

        self.app = self.Celery(set_as_current=False)
        if not self.contained:
            self.app.set_current()
        root = logging.getLogger()
        self.__rootlevel = root.level
        self.__roothandlers = root.handlers
        _state._set_task_join_will_block(False)
        try:
            self.setup()
        except:
            self._teardown_app()
            raise
コード例 #8
0
ファイル: prefork.py プロジェクト: anukat2015/AIR
def process_initializer(app, hostname):
    """Pool child process initializer.

    This will initialize a child pool process to ensure the correct
    app instance is used and things like
    logging works.

    """
    _set_task_join_will_block(True)
    platforms.signals.reset(*WORKER_SIGRESET)
    platforms.signals.ignore(*WORKER_SIGIGNORE)
    platforms.set_mp_process_title('celeryd', hostname=hostname)
    # This is for Windows and other platforms not supporting
    # fork(). Note that init_worker makes sure it's only
    # run once per process.
    app.loader.init_worker()
    app.loader.init_worker_process()
    app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
                  os.environ.get('CELERY_LOG_FILE') or None,
                  bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')))
    if os.environ.get('FORKED_BY_MULTIPROCESSING'):
        # pool did execv after fork
        trace.setup_worker_optimizations(app)
    else:
        app.set_current()
        set_default_app(app)
        app.finalize()
        trace._tasks = app._tasks  # enables fast_trace_task optimization.
    # rebuild execution handler for all tasks.
    from celery.app.trace import build_tracer
    for name, task in items(app.tasks):
        task.__trace__ = build_tracer(name, task, app.loader, hostname,
                                      app=app)
    signals.worker_process_init.send(sender=None)
コード例 #9
0
    def _teardown_app(self):
        from celery.utils.log import LoggingProxy
        assert sys.stdout
        assert sys.stderr
        assert sys.__stdout__
        assert sys.__stderr__
        this = self._get_test_name()
        if isinstance(sys.stdout, (LoggingProxy, Mock)) or \
                isinstance(sys.__stdout__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout'))
        if isinstance(sys.stderr, (LoggingProxy, Mock)) or \
                isinstance(sys.__stderr__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr'))
        backend = self.app.__dict__.get('backend')
        if backend is not None:
            if isinstance(backend, CacheBackend):
                if isinstance(backend.client, DummyClient):
                    backend.client.cache.clear()
                backend._cache.clear()
        from celery import _state
        _state._set_task_join_will_block(False)

        _state.set_default_app(self._default_app)
        _state._tls = self._prev_tls
        _state._tls.current_app = self._current_app
        if self.app is not self._current_app:
            self.app.close()
        self.app = None
        self.assertEqual(
            self._threads_at_setup,
            list(threading.enumerate()),
        )
コード例 #10
0
ファイル: result.py プロジェクト: joehybird/celery
def denied_join_result():
    reset_value = task_join_will_block()
    _set_task_join_will_block(True)
    try:
        yield
    finally:
        _set_task_join_will_block(reset_value)
コード例 #11
0
ファイル: prefork.py プロジェクト: guasek/celery
def process_initializer(app, hostname):
    """Pool child process initializer.

    This will initialize a child pool process to ensure the correct
    app instance is used and things like
    logging works.

    """
    _set_task_join_will_block(True)
    platforms.signals.reset(*WORKER_SIGRESET)
    platforms.signals.ignore(*WORKER_SIGIGNORE)
    platforms.set_mp_process_title('celeryd', hostname=hostname)
    # This is for Windows and other platforms not supporting
    # fork(). Note that init_worker makes sure it's only
    # run once per process.
    app.loader.init_worker()
    app.loader.init_worker_process()
    app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
                  os.environ.get('CELERY_LOG_FILE') or None,
                  bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')))
    if os.environ.get('FORKED_BY_MULTIPROCESSING'):
        # pool did execv after fork
        trace.setup_worker_optimizations(app)
    else:
        app.set_current()
        set_default_app(app)
        app.finalize()
        trace._tasks = app._tasks  # enables fast_trace_task optimization.
    # rebuild execution handler for all tasks.
    from celery.app.trace import build_tracer
    for name, task in items(app.tasks):
        task.__trace__ = build_tracer(name, task, app.loader, hostname,
                                      app=app)
    signals.worker_process_init.send(sender=None)
コード例 #12
0
ファイル: test_app.py プロジェクト: Scalr/celery
 def test_task_join_will_block(self, patching):
     patching('celery._state._task_join_will_block', 0)
     assert _state._task_join_will_block == 0
     _state._set_task_join_will_block(True)
     assert _state._task_join_will_block is True
     # fixture 'app' sets this, so need to use orig_ function
     # set there by that fixture.
     res = _state.orig_task_join_will_block()
     assert res is True
コード例 #13
0
ファイル: test_app.py プロジェクト: xmn1986/celery
 def test_task_join_will_block(self):
     prev, _state._task_join_will_block = _state._task_join_will_block, 0
     try:
         self.assertEqual(_state._task_join_will_block, 0)
         _state._set_task_join_will_block(True)
         print(_state.task_join_will_block)
         self.assertTrue(_state.task_join_will_block())
     finally:
         _state._task_join_will_block = prev
コード例 #14
0
ファイル: test_app.py プロジェクト: HideMode/celery
 def test_task_join_will_block(self):
     prev, _state._task_join_will_block = _state._task_join_will_block, 0
     try:
         self.assertEqual(_state._task_join_will_block, 0)
         _state._set_task_join_will_block(True)
         print(_state.task_join_will_block)
         self.assertTrue(_state.task_join_will_block())
     finally:
         _state._task_join_will_block = prev
コード例 #15
0
ファイル: test_app.py プロジェクト: zhangyitony/celery
 def test_task_join_will_block(self, patching):
     patching('celery._state._task_join_will_block', 0)
     assert _state._task_join_will_block == 0
     _state._set_task_join_will_block(True)
     assert _state._task_join_will_block is True
     # fixture 'app' sets this, so need to use orig_ function
     # set there by that fixture.
     res = _state.orig_task_join_will_block()
     assert res is True
コード例 #16
0
ファイル: conftest.py プロジェクト: udemy/celery
def task_join_will_not_block():
    from celery import _state, result
    prev_res_join_block = result.task_join_will_block
    _state.orig_task_join_will_block = _state.task_join_will_block
    prev_state_join_block = _state.task_join_will_block
    result.task_join_will_block = \
        _state.task_join_will_block = lambda: False
    _state._set_task_join_will_block(False)

    yield

    result.task_join_will_block = prev_res_join_block
    _state.task_join_will_block = prev_state_join_block
    _state._set_task_join_will_block(False)
コード例 #17
0
ファイル: conftest.py プロジェクト: tayfun/celery
def task_join_will_not_block():
    from celery import _state
    from celery import result
    prev_res_join_block = result.task_join_will_block
    _state.orig_task_join_will_block = _state.task_join_will_block
    prev_state_join_block = _state.task_join_will_block
    result.task_join_will_block = \
        _state.task_join_will_block = lambda: False
    _state._set_task_join_will_block(False)

    yield

    result.task_join_will_block = prev_res_join_block
    _state.task_join_will_block = prev_state_join_block
    _state._set_task_join_will_block(False)
コード例 #18
0
ファイル: conftest.py プロジェクト: yingzong/celery
def task_join_will_not_block(request):
    from celery import _state
    from celery import result
    prev_res_join_block = result.task_join_will_block
    _state.orig_task_join_will_block = _state.task_join_will_block
    prev_state_join_block = _state.task_join_will_block
    result.task_join_will_block = \
        _state.task_join_will_block = lambda: False
    _state._set_task_join_will_block(False)

    def fin():
        result.task_join_will_block = prev_res_join_block
        _state.task_join_will_block = prev_state_join_block
        _state._set_task_join_will_block(False)
    request.addfinalizer(fin)
コード例 #19
0
ファイル: conftest.py プロジェクト: sadhik-shaik/celery
def task_join_will_not_block(request):
    from celery import _state
    from celery import result
    prev_res_join_block = result.task_join_will_block
    _state.orig_task_join_will_block = _state.task_join_will_block
    prev_state_join_block = _state.task_join_will_block
    result.task_join_will_block = \
        _state.task_join_will_block = lambda: False
    _state._set_task_join_will_block(False)

    def fin():
        result.task_join_will_block = prev_res_join_block
        _state.task_join_will_block = prev_state_join_block
        _state._set_task_join_will_block(False)

    request.addfinalizer(fin)
コード例 #20
0
def process_initializer(app, hostname):
    """Pool child process initializer.

    Initialize the child pool process to ensure the correct
    app instance is used and things like logging works.
    """
    _set_task_join_will_block(True)
    platforms.signals.reset(*WORKER_SIGRESET)
    platforms.signals.ignore(*WORKER_SIGIGNORE)
    platforms.set_mp_process_title("celeryd", hostname=hostname)
    # This is for Windows and other platforms not supporting
    # fork().  Note that init_worker makes sure it's only
    # run once per process.
    app.loader.init_worker()
    app.loader.init_worker_process()
    logfile = os.environ.get("CELERY_LOG_FILE") or None
    if logfile and "%i" in logfile.lower():
        # logfile path will differ so need to set up logging again.
        app.log.already_setup = False
    app.log.setup(
        int(os.environ.get("CELERY_LOG_LEVEL", 0) or 0),
        logfile,
        bool(os.environ.get("CELERY_LOG_REDIRECT", False)),
        str(os.environ.get("CELERY_LOG_REDIRECT_LEVEL")),
        hostname=hostname,
    )
    if os.environ.get("FORKED_BY_MULTIPROCESSING"):
        # pool did execv after fork
        trace.setup_worker_optimizations(app, hostname)
    else:
        app.set_current()
        set_default_app(app)
        app.finalize()
        trace._tasks = app._tasks  # enables fast_trace_task optimization.
    # rebuild execution handler for all tasks.
    from celery.app.trace import build_tracer

    for name, task in items(app.tasks):
        task.__trace__ = build_tracer(name,
                                      task,
                                      app.loader,
                                      hostname,
                                      app=app)
    from celery.worker import state as worker_state

    worker_state.reset_state()
    signals.worker_process_init.send(sender=None)
コード例 #21
0
ファイル: case.py プロジェクト: eliziario/celery
    def _teardown_app(self):
        from celery import _state
        from celery import result
        from celery.utils.log import LoggingProxy

        assert sys.stdout
        assert sys.stderr
        assert sys.__stdout__
        assert sys.__stderr__
        this = self._get_test_name()
        result.task_join_will_block = self._prev_res_join_block
        _state.task_join_will_block = self._prev_state_join_block
        if isinstance(sys.stdout, (LoggingProxy, Mock)) or isinstance(sys.__stdout__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, "stdout"))
        if isinstance(sys.stderr, (LoggingProxy, Mock)) or isinstance(sys.__stderr__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, "stderr"))
        backend = self.app.__dict__.get("backend")
        if backend is not None:
            if isinstance(backend, CacheBackend):
                if isinstance(backend.client, DummyClient):
                    backend.client.cache.clear()
                backend._cache.clear()
        from celery import _state

        _state._set_task_join_will_block(False)

        _state.set_default_app(self._default_app)
        _state._tls = self._prev_tls
        _state._tls.current_app = self._current_app
        if self.app is not self._current_app:
            self.app.close()
        self.app = None
        self.assertEqual(self._threads_at_setup, alive_threads())

        # Make sure no test left the shutdown flags enabled.
        from celery.worker import state as worker_state

        # check for EX_OK
        self.assertIsNot(worker_state.should_stop, False)
        self.assertIsNot(worker_state.should_terminate, False)
        # check for other true values
        self.assertFalse(worker_state.should_stop)
        self.assertFalse(worker_state.should_terminate)
コード例 #22
0
ファイル: case.py プロジェクト: gjames2467/celery
    def _teardown_app(self):
        from celery import _state
        from celery import result
        from celery.utils.log import LoggingProxy
        assert sys.stdout
        assert sys.stderr
        assert sys.__stdout__
        assert sys.__stderr__
        this = self._get_test_name()
        result.task_join_will_block = self._prev_res_join_block
        _state.task_join_will_block = self._prev_state_join_block
        if isinstance(sys.stdout, (LoggingProxy, Mock)) or \
                isinstance(sys.__stdout__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout'))
        if isinstance(sys.stderr, (LoggingProxy, Mock)) or \
                isinstance(sys.__stderr__, (LoggingProxy, Mock)):
            raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr'))
        backend = self.app.__dict__.get('backend')
        if backend is not None:
            if isinstance(backend, CacheBackend):
                if isinstance(backend.client, DummyClient):
                    backend.client.cache.clear()
                backend._cache.clear()
        from celery import _state
        _state._set_task_join_will_block(False)

        _state.set_default_app(self._default_app)
        _state._tls = self._prev_tls
        _state._tls.current_app = self._current_app
        if self.app is not self._current_app:
            self.app.close()
        self.app = None
        self.assertEqual(self._threads_at_setup, alive_threads())

        # Make sure no test left the shutdown flags enabled.
        from celery.worker import state as worker_state
        # check for EX_OK
        self.assertIsNot(worker_state.should_stop, False)
        self.assertIsNot(worker_state.should_terminate, False)
        # check for other true values
        self.assertFalse(worker_state.should_stop)
        self.assertFalse(worker_state.should_terminate)
コード例 #23
0
ファイル: conftest.py プロジェクト: yingzong/celery
 def fin():
     result.task_join_will_block = prev_res_join_block
     _state.task_join_will_block = prev_state_join_block
     _state._set_task_join_will_block(False)
コード例 #24
0
ファイル: conftest.py プロジェクト: sadhik-shaik/celery
 def fin():
     result.task_join_will_block = prev_res_join_block
     _state.task_join_will_block = prev_state_join_block
     _state._set_task_join_will_block(False)