Beispiel #1
0
def process_initializer(app, hostname):
    """Pool child process initializer.

    Initialize the child pool process to ensure the correct
    app instance is used and things like logging works.
    """
    _set_task_join_will_block(True)
    platforms.signals.reset(*WORKER_SIGRESET)
    platforms.signals.ignore(*WORKER_SIGIGNORE)
    platforms.set_mp_process_title("celeryd", hostname=hostname)
    # This is for Windows and other platforms not supporting
    # fork().  Note that init_worker makes sure it's only
    # run once per process.
    app.loader.init_worker()
    app.loader.init_worker_process()
    logfile = os.environ.get("CELERY_LOG_FILE") or None
    if logfile and "%i" in logfile.lower():
        # logfile path will differ so need to set up logging again.
        app.log.already_setup = False
    app.log.setup(
        int(os.environ.get("CELERY_LOG_LEVEL", 0) or 0),
        logfile,
        bool(os.environ.get("CELERY_LOG_REDIRECT", False)),
        str(os.environ.get("CELERY_LOG_REDIRECT_LEVEL")),
        hostname=hostname,
    )
    if os.environ.get("FORKED_BY_MULTIPROCESSING"):
        # pool did execv after fork
        trace.setup_worker_optimizations(app, hostname)
    else:
        app.set_current()
        set_default_app(app)
        app.finalize()
        trace._tasks = app._tasks  # enables fast_trace_task optimization.
    # rebuild execution handler for all tasks.
    from celery.app.trace import build_tracer

    for name, task in items(app.tasks):
        task.__trace__ = build_tracer(name,
                                      task,
                                      app.loader,
                                      hostname,
                                      app=app)
    from celery.worker import state as worker_state

    worker_state.reset_state()
    signals.worker_process_init.send(sender=None)
Beispiel #2
0
def process_initializer(app, hostname):
    """Pool child process initializer.

    This will initialize a child pool process to ensure the correct
    app instance is used and things like
    logging works.

    """
    _set_task_join_will_block(True)
    platforms.signals.reset(*WORKER_SIGRESET)
    platforms.signals.ignore(*WORKER_SIGIGNORE)
    platforms.set_mp_process_title('celeryd', hostname=hostname)
    # This is for Windows and other platforms not supporting
    # fork(). Note that init_worker makes sure it's only
    # run once per process.
    app.loader.init_worker()
    app.loader.init_worker_process()
    logfile = os.environ.get('CELERY_LOG_FILE') or None
    if logfile and '%i' in logfile.lower():
        # logfile path will differ so need to set up logging again.
        app.log.already_setup = False
    app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0),
                  logfile,
                  bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')),
                  hostname=hostname)
    if os.environ.get('FORKED_BY_MULTIPROCESSING'):
        # pool did execv after fork
        trace.setup_worker_optimizations(app, hostname)
    else:
        app.set_current()
        set_default_app(app)
        app.finalize()
        trace._tasks = app._tasks  # enables fast_trace_task optimization.
    # rebuild execution handler for all tasks.
    from celery.app.trace import build_tracer
    for name, task in items(app.tasks):
        task.__trace__ = build_tracer(name, task, app.loader, hostname,
                                      app=app)
    from celery.worker import state as worker_state
    worker_state.reset_state()
    signals.worker_process_init.send(sender=None)