def process_initializer(): # There seems to a bug in multiprocessing (backport?) # when detached, where the worker gets EOFErrors from time to time # and the logger is left from the parent process causing a crash. platform.reset_signal("SIGTERM") _hijack_multiprocessing_logger() platform.set_mp_process_title("celeryd")
def execute_and_trace(task_name, *args, **kwargs): """This is a pickleable method used as a target when applying to pools. It's the same as:: >>> WorkerTaskTrace(task_name, *args, **kwargs).execute_safe() """ platform.set_mp_process_title("celeryd", info=task_name) try: return WorkerTaskTrace(task_name, *args, **kwargs).execute_safe() finally: platform.set_mp_process_title("celeryd")
def process_initializer(): # There seems to a bug in multiprocessing (backport?) # when detached, where the worker gets EOFErrors from time to time # and the logger is left from the parent process causing a crash. _hijack_multiprocessing_logger() platform.reset_signal("SIGTERM") platform.set_mp_process_title("celeryd") # This is for windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. from celery.loaders import current_loader current_loader().init_worker()
def process_initializer(): """Initializes the process so it can be used to process tasks. Used for multiprocessing environments. """ map(platform.reset_signal, WORKER_SIGRESET) map(platform.ignore_signal, WORKER_SIGIGNORE) platform.set_mp_process_title("celeryd") # This is for windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. from celery.loaders import current_loader current_loader().init_worker() signals.worker_process_init.send(sender=None)
def process_initializer(): """Initializes the process so it can be used to process tasks. Used for multiprocessing environments. """ # There seems to a bug in multiprocessing (backport?) # when detached, where the worker gets EOFErrors from time to time # and the logger is left from the parent process causing a crash. _hijack_multiprocessing_logger() platform.reset_signal("SIGTERM") platform.ignore_signal("SIGINT") platform.set_mp_process_title("celeryd") # This is for windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. from celery.loaders import current_loader current_loader().init_worker() signals.worker_process_init.send(sender=None)
def process_initializer(): """Initializes the process so it can be used to process tasks. Used for multiprocessing environments. """ # There seems to a bug in multiprocessing (backport?) # when detached, where the worker gets EOFErrors from time to time # and the logger is left from the parent process causing a crash. _hijack_multiprocessing_logger() map(platform.reset_signal, WORKER_SIGRESET) map(platform.ignore_signal, WORKER_SIGIGNORE) platform.set_mp_process_title("celeryd") # This is for windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. from celery.loaders import current_loader current_loader().init_worker() signals.worker_process_init.send(sender=None)
def process_initializer(): # There seems to a bug in multiprocessing (backport?) # when detached, where the worker gets EOFErrors from time to time # and the logger is left from the parent process causing a crash. _hijack_multiprocessing_logger() platform.reset_signal("SIGTERM") platform.set_mp_process_title("celeryd") # On Windows we need to run a dummy command 'celeryinit' # for django to fully initialize after fork() if not callable(getattr(os, "fork", None)): from django.core.management import execute_manager settings_mod = os.environ.get("DJANGO_SETTINGS_MODULE", "settings") project_settings = __import__(settings_mod, {}, {}, [""]) execute_manager(project_settings, argv=["manage.py", "celeryinit"]) # This is for windows and other platforms not supporting # fork(). Note that init_worker makes sure it's only # run once per process. from celery.loaders import current_loader current_loader().init_worker()
def process_initializer(): platform.set_mp_process_title("celeryd")
def set_process_status(info): arg_start = "manage" in sys.argv[0] and 2 or 1 if sys.argv[arg_start:]: info = "%s (%s)" % (info, " ".join(sys.argv[arg_start:])) platform.set_mp_process_title("celeryd", info=info)
def execute_and_trace(task_name, *args, **kwargs): platform.set_mp_process_title("celeryd", info=task_name) try: return WorkerTaskTrace(task_name, *args, **kwargs).execute_safe() finally: platform.set_mp_process_title("celeryd")
def set_process_status(info): arg_start = "manage" in sys.argv[0] and 2 or 1 if sys.argv[arg_start:]: info = "%s (%s)" % (info, " ".join(sys.argv[arg_start:])) return platform.set_mp_process_title("celeryd", info=info)