def app(request): from celery import _state prev_current_app = current_app() prev_default_app = _state.default_app prev_finalizers = set(_state._on_app_finalizers) prev_apps = weakref.WeakSet(_state._apps) trap = Trap() prev_tls = _state._tls _state.set_default_app(trap) class NonTLS(object): current_app = trap _state._tls = NonTLS() app = TestApp(set_as_current=False) is_not_contained = any([ not getattr(request.module, 'app_contained', True), not getattr(request.cls, 'app_contained', True), not getattr(request.function, 'app_contained', True) ]) if is_not_contained: app.set_current() def fin(): _state.set_default_app(prev_default_app) _state._tls = prev_tls _state._tls.current_app = prev_current_app if app is not prev_current_app: app.close() _state._on_app_finalizers = prev_finalizers _state._apps = prev_apps request.addfinalizer(fin) return app
def setUp(self): self._threads_at_setup = list(threading.enumerate()) from celery import _state from celery import result result.task_join_will_block = \ _state.task_join_will_block = lambda: False self._current_app = current_app() self._default_app = _state.default_app trap = Trap() self._prev_tls = _state._tls _state.set_default_app(trap) class NonTLS(object): current_app = trap _state._tls = NonTLS() self.app = self.Celery(set_as_current=False) if not self.contained: self.app.set_current() root = logging.getLogger() self.__rootlevel = root.level self.__roothandlers = root.handlers _state._set_task_join_will_block(False) try: self.setup() except: self._teardown_app() raise
def process_initializer(app, hostname): """Initializes the process so it can be used to process tasks.""" platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.maybe_patch_process_group() platforms.set_mp_process_title('celeryd', hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0)), os.environ.get('CELERY_LOG_FILE') or None, bool(os.environ.get('CELERY_LOG_REDIRECT', False)), str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL'))) app.loader.init_worker() app.loader.init_worker_process() if os.environ.get('FORKED_BY_MULTIPROCESSING'): # pool did execv after fork trace.setup_worker_optimizations(app) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. from celery.task.trace import build_tracer for name, task in items(app.tasks): task.__trace__ = build_tracer(name, task, app.loader, hostname) signals.worker_process_init.send(sender=None)
def app(request): """Fixture creating a Celery application instance.""" from celery import _state prev_current_app = current_app() prev_default_app = _state.default_app prev_finalizers = set(_state._on_app_finalizers) prev_apps = weakref.WeakSet(_state._apps) trap = Trap() prev_tls = _state._tls _state.set_default_app(trap) class NonTLS(object): current_app = trap _state._tls = NonTLS() test_app = TestApp(set_as_current=False) is_not_contained = any([ not getattr(request.module, 'app_contained', True), not getattr(request.cls, 'app_contained', True), not getattr(request.function, 'app_contained', True) ]) if is_not_contained: test_app.set_current() yield test_app _state.set_default_app(prev_default_app) _state._tls = prev_tls _state._tls.current_app = prev_current_app if test_app is not prev_current_app: test_app.close() _state._on_app_finalizers = prev_finalizers _state._apps = prev_apps
def _teardown_app(self): from celery.utils.log import LoggingProxy assert sys.stdout assert sys.stderr assert sys.__stdout__ assert sys.__stderr__ this = self._get_test_name() if isinstance(sys.stdout, (LoggingProxy, Mock)) or \ isinstance(sys.__stdout__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) if isinstance(sys.stderr, (LoggingProxy, Mock)) or \ isinstance(sys.__stderr__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) backend = self.app.__dict__.get('backend') if backend is not None: if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear() from celery import _state _state._set_task_join_will_block(False) _state.set_default_app(self._default_app) _state._tls = self._prev_tls _state._tls.current_app = self._current_app if self.app is not self._current_app: self.app.close() self.app = None self.assertEqual( self._threads_at_setup, list(threading.enumerate()), )
def process_initializer(app, hostname): """Pool child process initializer. This will initialize a child pool process to ensure the correct app instance is used and things like logging works. """ platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title('celeryd', hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.loader.init_worker() app.loader.init_worker_process() app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), os.environ.get('CELERY_LOG_FILE') or None, bool(os.environ.get('CELERY_LOG_REDIRECT', False)), str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL'))) if os.environ.get('FORKED_BY_MULTIPROCESSING'): # pool did execv after fork trace.setup_worker_optimizations(app) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. # rebuild execution handler for all tasks. from celery.app.trace import build_tracer for name, task in items(app.tasks): task.__trace__ = build_tracer(name, task, app.loader, hostname, app=app) signals.worker_process_init.send(sender=None)
def process_initializer(app, hostname): """Pool child process initializer.""" platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title("celeryd", hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.loader.init_worker() app.loader.init_worker_process() app.log.setup( int(os.environ.get("CELERY_LOG_LEVEL", 0)), os.environ.get("CELERY_LOG_FILE") or None, bool(os.environ.get("CELERY_LOG_REDIRECT", False)), str(os.environ.get("CELERY_LOG_REDIRECT_LEVEL")), ) if os.environ.get("FORKED_BY_MULTIPROCESSING"): # pool did execv after fork trace.setup_worker_optimizations(app) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. from celery.task.trace import build_tracer for name, task in items(app.tasks): task.__trace__ = build_tracer(name, task, app.loader, hostname) signals.worker_process_init.send(sender=None)
def _teardown_app(self): from celery.utils.log import LoggingProxy assert sys.stdout assert sys.stderr assert sys.__stdout__ assert sys.__stderr__ this = self._get_test_name() if isinstance(sys.stdout, LoggingProxy) or \ isinstance(sys.__stdout__, LoggingProxy): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) if isinstance(sys.stderr, LoggingProxy) or \ isinstance(sys.__stderr__, LoggingProxy): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) backend = self.app.__dict__.get('backend') if backend is not None: if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear() from celery._state import _tls, set_default_app set_default_app(self._default_app) _tls.current_app = self._current_app if self.app is not self._current_app: self.app.close() self.app = None self.assertEqual( self._threads_at_setup, list(threading.enumerate()), )
def fin(): _state.set_default_app(prev_default_app) _state._tls = prev_tls _state._tls.current_app = prev_current_app if app is not prev_current_app: app.close() _state._on_app_finalizers = prev_finalizers _state._apps = prev_apps
def setup(): if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv: from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() warnings.resetwarnings() from celery.tests.case import Trap from celery._state import set_default_app set_default_app(Trap())
def _teardown_app(self): backend = self.app.__dict__.get('backend') if backend is not None: if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear() from celery._state import _tls, set_default_app set_default_app(self._default_app) _tls.current_app = self._current_app if self.app is not self._current_app: self.app.close() self.app = None
def setup(): os.environ.update( # warn if config module not found C_WNOCONF='yes', KOMBU_DISABLE_LIMIT_PROTECTION='yes', ) if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv: from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() warnings.resetwarnings() from celery.tests.case import Trap from celery._state import set_default_app set_default_app(Trap())
def setup_session(scope='session'): using_coverage = (os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv) os.environ.update( # warn if config module not found C_WNOCONF='yes', KOMBU_DISABLE_LIMIT_PROTECTION='yes', ) if using_coverage and not PYPY3: from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() warnings.resetwarnings() from celery._state import set_default_app set_default_app(Trap())
def set_trap(app): """Contextmanager that installs the trap app. The trap means that anything trying to use the current or default app will raise an exception. """ trap = Trap() prev_tls = _state._tls _state.set_default_app(trap) class NonTLS(object): current_app = trap _state._tls = NonTLS() yield _state._tls = prev_tls
def process_initializer(app, hostname): """Pool child process initializer. Initialize the child pool process to ensure the correct app instance is used and things like logging works. """ _set_task_join_will_block(True) platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title("celeryd", hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.loader.init_worker() app.loader.init_worker_process() logfile = os.environ.get("CELERY_LOG_FILE") or None if logfile and "%i" in logfile.lower(): # logfile path will differ so need to set up logging again. app.log.already_setup = False app.log.setup( int(os.environ.get("CELERY_LOG_LEVEL", 0) or 0), logfile, bool(os.environ.get("CELERY_LOG_REDIRECT", False)), str(os.environ.get("CELERY_LOG_REDIRECT_LEVEL")), hostname=hostname, ) if os.environ.get("FORKED_BY_MULTIPROCESSING"): # pool did execv after fork trace.setup_worker_optimizations(app, hostname) else: app.set_current() set_default_app(app) app.finalize() trace._tasks = app._tasks # enables fast_trace_task optimization. # rebuild execution handler for all tasks. from celery.app.trace import build_tracer for name, task in items(app.tasks): task.__trace__ = build_tracer(name, task, app.loader, hostname, app=app) from celery.worker import state as worker_state worker_state.reset_state() signals.worker_process_init.send(sender=None)
def setup_session(scope='session'): using_coverage = ( os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv ) os.environ.update( # warn if config module not found C_WNOCONF='yes', KOMBU_DISABLE_LIMIT_PROTECTION='yes', ) if using_coverage and not PYPY3: from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() warnings.resetwarnings() from celery._state import set_default_app set_default_app(Trap())
def setUp(self): from celery import _state self._current_app = current_app() self._default_app = _state.default_app trap = Trap() _state.set_default_app(trap) _state._tls.current_app = trap self.app = self.Celery(set_as_current=False) if not self.contained: self.app.set_current() root = logging.getLogger() self.__rootlevel = root.level self.__roothandlers = root.handlers try: self.setup() except: self._teardown_app() raise
def _teardown_app(self): from celery import _state from celery import result from celery.utils.log import LoggingProxy assert sys.stdout assert sys.stderr assert sys.__stdout__ assert sys.__stderr__ this = self._get_test_name() result.task_join_will_block = self._prev_res_join_block _state.task_join_will_block = self._prev_state_join_block if isinstance(sys.stdout, (LoggingProxy, Mock)) or isinstance(sys.__stdout__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, "stdout")) if isinstance(sys.stderr, (LoggingProxy, Mock)) or isinstance(sys.__stderr__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, "stderr")) backend = self.app.__dict__.get("backend") if backend is not None: if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear() from celery import _state _state._set_task_join_will_block(False) _state.set_default_app(self._default_app) _state._tls = self._prev_tls _state._tls.current_app = self._current_app if self.app is not self._current_app: self.app.close() self.app = None self.assertEqual(self._threads_at_setup, alive_threads()) # Make sure no test left the shutdown flags enabled. from celery.worker import state as worker_state # check for EX_OK self.assertIsNot(worker_state.should_stop, False) self.assertIsNot(worker_state.should_terminate, False) # check for other true values self.assertFalse(worker_state.should_stop) self.assertFalse(worker_state.should_terminate)
def _teardown_app(self): from celery import _state from celery import result from celery.utils.log import LoggingProxy assert sys.stdout assert sys.stderr assert sys.__stdout__ assert sys.__stderr__ this = self._get_test_name() result.task_join_will_block = self._prev_res_join_block _state.task_join_will_block = self._prev_state_join_block if isinstance(sys.stdout, (LoggingProxy, Mock)) or \ isinstance(sys.__stdout__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) if isinstance(sys.stderr, (LoggingProxy, Mock)) or \ isinstance(sys.__stderr__, (LoggingProxy, Mock)): raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) backend = self.app.__dict__.get('backend') if backend is not None: if isinstance(backend, CacheBackend): if isinstance(backend.client, DummyClient): backend.client.cache.clear() backend._cache.clear() from celery import _state _state._set_task_join_will_block(False) _state.set_default_app(self._default_app) _state._tls = self._prev_tls _state._tls.current_app = self._current_app if self.app is not self._current_app: self.app.close() self.app = None self.assertEqual(self._threads_at_setup, alive_threads()) # Make sure no test left the shutdown flags enabled. from celery.worker import state as worker_state # check for EX_OK self.assertIsNot(worker_state.should_stop, False) self.assertIsNot(worker_state.should_terminate, False) # check for other true values self.assertFalse(worker_state.should_stop) self.assertFalse(worker_state.should_terminate)
def setUp(self): self._threads_at_setup = list(threading.enumerate()) from celery import _state self._current_app = current_app() self._default_app = _state.default_app trap = Trap() _state.set_default_app(trap) _state._tls.current_app = trap self.app = self.Celery(set_as_current=False) if not self.contained: self.app.set_current() root = logging.getLogger() self.__rootlevel = root.level self.__roothandlers = root.handlers try: self.setup() except: self._teardown_app() raise
def setup_default_app(app, use_trap=False): """Setup default app for testing. Ensures state is clean after the test returns. """ prev_current_app = _state.get_current_app() prev_default_app = _state.default_app prev_finalizers = set(_state._on_app_finalizers) prev_apps = weakref.WeakSet(_state._apps) if use_trap: with set_trap(app): yield else: yield _state.set_default_app(prev_default_app) _state._tls.current_app = prev_current_app if app is not prev_current_app: app.close() _state._on_app_finalizers = prev_finalizers _state._apps = prev_apps
def process_initializer(app, hostname): """Initializes the process so it can be used to process tasks.""" app.set_current() set_default_app(app) trace._tasks = app._tasks # make sure this optimization is set. platforms.signals.reset(*WORKER_SIGRESET) platforms.signals.ignore(*WORKER_SIGIGNORE) platforms.set_mp_process_title('celeryd', hostname=hostname) # This is for Windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0)), os.environ.get('CELERY_LOG_FILE') or None, bool(os.environ.get('CELERY_LOG_REDIRECT', False)), str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL'))) app.loader.init_worker() app.loader.init_worker_process() app.finalize() from celery.task.trace import build_tracer for name, task in app.tasks.iteritems(): task.__trace__ = build_tracer(name, task, app.loader, hostname) signals.worker_process_init.send(sender=None)
def set_default(self): set_default_app(self)
def set_default(self): """Makes this the default app for all threads.""" set_default_app(self)
app = _state.default_app if app is None: raise RuntimeError("""Celery is not initialized. If you are seeing this error it probably means you have imported something that depends on the Celery app before initializing it. Adjust imports and/or INSTALLED_APPS (see corehq.apps.celery). """) return app # Monkey patch Celery. The app will be initialized in # corehq.apps.celery._init_celery_app during Django setup. os.environ.setdefault("C_STRICT_APP", "1") from celery import _state # noqa: E402 if _state.default_app is not None: # Reset default app, which can be initialized by gevent monkey # patching, which traverses gc.get_objects() and calls # isinstance(...) on each object. _state.set_default_app(None) assert _state._tls.current_app is None, "Current app already created" assert hasattr(_state.current_app, "_Proxy__local") object.__setattr__(_state.current_app, "_Proxy__local", _get_current_app) _state.get_current_app = _get_current_app if os.environ.get('DOCS_BUILD'): # Before building docs, django.setup() needs to be run, specifically to register apps so that imports succeed. # Adding this in docs/conf.py would be too early because autodoc_mock_imports have not yet been applied. import django django.setup()
def set_default(self): """Make this the default app for all threads.""" set_default_app(self)
def setup_default_app_trap(): from celery._state import set_default_app set_default_app(Trap())
# Add the root to the python path root = os.path.abspath(os.path.join(settings.PROJECT_ROOT, '../')) sys.path.append(root) celery = Celery(__name__) celery.config_from_object(settings) # Celery should set this app as the default, however the 'celery.current_app' # api uses threadlocals, so code running in different threads/greenlets uses # the fallback default instead of this app when no app is specified. This # causes confusing connection errors when celery tries to connect to a # non-existent rabbitmq server. It seems to happen mostly when using the # 'celery.canvas' api. To get around this, we use the internal 'celery._state' # api to force our app to be the default. set_default_app(celery) logger = logging.getLogger(__name__) @task(ignore_result=True) def run_status_check(check_or_id): from .models import StatusCheck if not isinstance(check_or_id, StatusCheck): check = StatusCheck.objects.get(id=check_or_id) else: check = check_or_id # This will call the subclass method check.run() @task(ignore_result=True)
#: Proxy always returning the app set as default. default_app = Proxy(lambda: _state.default_app) #: Function returning the app provided or the default app if none. #: #: The environment variable :envvar:`CELERY_TRACE_APP` is used to #: trace app leaks. When enabled an exception is raised if there #: is no active app. app_or_default = None #: The 'default' loader is the default loader used by old applications. default_loader = os.environ.get('CELERY_LOADER') or 'default' #: Global fallback app instance. set_default_app(Celery('default', loader=default_loader, set_as_current=False, accept_magic_kwargs=True)) def bugreport(): return current_app().bugreport() def _app_or_default(app=None): if app is None: return _state.get_current_app() return app def _app_or_default_trace(app=None): # pragma: no cover from traceback import print_stack
from __future__ import absolute_import import os from django.conf import settings from celery import Celery from celery._state import set_default_app os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'main.settings') app = Celery('main', set_as_current=True) set_default_app(app) app.config_from_object('django.conf:settings') app.autodiscover_tasks(settings.INSTALLED_APPS, related_name='tasks')
default_app = Proxy(lambda: _state.default_app) #: Function returning the app provided or the default app if none. #: #: The environment variable :envvar:`CELERY_TRACE_APP` is used to #: trace app leaks. When enabled an exception is raised if there #: is no active app. app_or_default = None #: The 'default' loader is the default loader used by old applications. default_loader = os.environ.get('CELERY_LOADER') or 'default' #: Global fallback app instance. set_default_app( Celery('default', loader=default_loader, set_as_current=False, accept_magic_kwargs=True)) def bugreport(): return current_app().bugreport() def _app_or_default(app=None): if app is None: return _state.get_current_app() return app def _app_or_default_trace(app=None): # pragma: no cover