コード例 #1
0
def detach(
    path,
    argv,
    logfile=None,
    pidfile=None,
    uid=None,
    gid=None,
    umask=None,
    working_directory=None,
    fake=False,
    app=None,
    executable=None,
    hostname=None,
):
    hostname = default_nodename(hostname)
    logfile = node_format(logfile, hostname)
    pidfile = node_format(pidfile, hostname)
    fake = 1 if C_FAKEFORK else fake
    with detached(logfile, pidfile, uid, gid, umask, working_directory, fake, after_forkers=False):
        try:
            if executable is not None:
                path = executable
            os.execv(path, [path] + argv)
        except Exception:
            if app is None:
                from celery import current_app

                app = current_app
            app.log.setup_logging_subsystem("ERROR", logfile, hostname=hostname)
            logger.critical("Can't exec %r", " ".join([path] + argv), exc_info=True)
        return EX_FAILURE
コード例 #2
0
ファイル: worker.py プロジェクト: uzbekdev1/celery
def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
           loglevel=None, logfile=None, pidfile=None, statedb=None,
           **kwargs):
    """Start worker instance.

    Examples
    --------
    $ celery --app=proj worker -l INFO
    $ celery -A proj worker -l INFO -Q hipri,lopri
    $ celery -A proj worker --concurrency=4
    $ celery -A proj worker --concurrency=1000 -P eventlet
    $ celery worker --autoscale=10,0

    """
    app = ctx.obj.app
    if ctx.args:
        try:
            app.config_from_cmdline(ctx.args, namespace='worker')
        except (KeyError, ValueError) as e:
            # TODO: Improve the error messages
            raise click.UsageError(
                "Unable to parse extra configuration from command line.\n"
                f"Reason: {e}", ctx=ctx)
    if kwargs.get('detach', False):
        argv = ['-m', 'celery'] + sys.argv[1:]
        if '--detach' in argv:
            argv.remove('--detach')
        if '-D' in argv:
            argv.remove('-D')

        return detach(sys.executable,
                      argv,
                      logfile=logfile,
                      pidfile=pidfile,
                      uid=uid, gid=gid,
                      umask=kwargs.get('umask', None),
                      workdir=kwargs.get('workdir', None),
                      app=app,
                      executable=kwargs.get('executable', None),
                      hostname=hostname)

    maybe_drop_privileges(uid=uid, gid=gid)
    worker = app.Worker(
        hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
        logfile=logfile,  # node format handled by celery.app.log.setup
        pidfile=node_format(pidfile, hostname),
        statedb=node_format(statedb, hostname),
        no_color=ctx.obj.no_color,
        **kwargs)
    worker.start()
    return worker.exitcode
コード例 #3
0
ファイル: log.py プロジェクト: AlerzDev/Brazo-Proyecto-Final
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
                                colorize=None, hostname=None, **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        Logging._setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.worker_hijack_root_logger:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(
                root, logfile, loglevel, format, colorize, **kwargs
            )

            # Configure the multiprocessing logger
            self._configure_logger(
                get_multiprocessing_logger(),
                logfile, loglevel if MP_LOG else logging.ERROR,
                format, colorize, **kwargs
            )

            signals.after_setup_logger.send(
                sender=None, logger=root,
                loglevel=loglevel, logfile=logfile,
                format=format, colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
コード例 #4
0
ファイル: log.py プロジェクト: xpxu/celery
    def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None,
                                colorize=None, hostname=None, **kwargs):
        if self.already_setup:
            return
        if logfile and hostname:
            logfile = node_format(logfile, hostname)
        self.already_setup = True
        loglevel = mlevel(loglevel or self.loglevel)
        format = format or self.format
        colorize = self.supports_color(colorize, logfile)
        reset_multiprocessing_logger()
        receivers = signals.setup_logging.send(
            sender=None, loglevel=loglevel, logfile=logfile,
            format=format, colorize=colorize,
        )

        if not receivers:
            root = logging.getLogger()

            if self.app.conf.worker_hijack_root_logger:
                root.handlers = []
                get_logger('celery').handlers = []
                get_logger('celery.task').handlers = []
                get_logger('celery.redirected').handlers = []

            # Configure root logger
            self._configure_logger(
                root, logfile, loglevel, format, colorize, **kwargs
            )

            # Configure the multiprocessing logger
            self._configure_logger(
                get_multiprocessing_logger(),
                logfile, loglevel if MP_LOG else logging.ERROR,
                format, colorize, **kwargs
            )

            signals.after_setup_logger.send(
                sender=None, logger=root,
                loglevel=loglevel, logfile=logfile,
                format=format, colorize=colorize,
            )

            # then setup the root task logger.
            self.setup_task_loggers(loglevel, logfile, colorize=colorize)

        try:
            stream = logging.getLogger().handlers[0].stream
        except (AttributeError, IndexError):
            pass
        else:
            set_default_encoding_file(stream)

        # This is a hack for multiprocessing's fork+exec, so that
        # logging before Process.run works.
        logfile_name = logfile if isinstance(logfile, string_t) else ''
        os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                          _MP_FORK_LOGFILE_=logfile_name,
                          _MP_FORK_LOGFORMAT_=format)
        return receivers
コード例 #5
0
def detach(
    path,
    argv,
    logfile=None,
    pidfile=None,
    uid=None,
    gid=None,
    umask=None,
    workdir=None,
    fake=False,
    app=None,
    executable=None,
    hostname=None,
):
    """Detach program by argv'."""
    hostname = default_nodename(hostname)
    logfile = node_format(logfile, hostname)
    pidfile = node_format(pidfile, hostname)
    fake = 1 if C_FAKEFORK else fake
    with detached(logfile,
                  pidfile,
                  uid,
                  gid,
                  umask,
                  workdir,
                  fake,
                  after_forkers=False):
        try:
            if executable is not None:
                path = executable
            os.execv(path, [path] + argv)
        except Exception:  # pylint: disable=broad-except
            if app is None:
                from celery import current_app

                app = current_app
            app.log.setup_logging_subsystem("ERROR",
                                            logfile,
                                            hostname=hostname)
            logger.critical("Can't exec %r",
                            " ".join([path] + argv),
                            exc_info=True)
        return EX_FAILURE
コード例 #6
0
ファイル: celeryd_detach.py プロジェクト: csunny/celery
def detach(path,
           argv,
           logfile=None,
           pidfile=None,
           uid=None,
           gid=None,
           umask=None,
           working_directory=None,
           fake=False,
           app=None,
           executable=None,
           hostname=None):
    hostname = default_nodename(hostname)
    logfile = node_format(logfile, hostname)
    pidfile = node_format(pidfile, hostname)
    fake = 1 if C_FAKEFORK else fake
    with detached(logfile,
                  pidfile,
                  uid,
                  gid,
                  umask,
                  working_directory,
                  fake,
                  after_forkers=False):
        try:
            if executable is not None:
                path = executable
            os.execv(path, [path] + argv)
        except Exception:
            if app is None:
                from celery import current_app
                app = current_app
            app.log.setup_logging_subsystem('ERROR',
                                            logfile,
                                            hostname=hostname)
            logger.critical("Can't exec %r",
                            ' '.join([path] + argv),
                            exc_info=True)
        return EX_FAILURE
コード例 #7
0
def detach(path, argv, logfile=None, pidfile=None, uid=None,
           gid=None, umask=None, workdir=None, fake=False, app=None,
           executable=None, hostname=None):
    """Detach program by argv'."""
    hostname = default_nodename(hostname)
    logfile = node_format(logfile, hostname)
    pidfile = node_format(pidfile, hostname)
    fake = 1 if C_FAKEFORK else fake
    with detached(logfile, pidfile, uid, gid, umask, workdir, fake,
                  after_forkers=False):
        try:
            if executable is not None:
                path = executable
            os.execv(path, [path] + argv)
        except Exception:  # pylint: disable=broad-except
            if app is None:
                from celery import current_app
                app = current_app
            app.log.setup_logging_subsystem(
                'ERROR', logfile, hostname=hostname)
            logger.critical("Can't exec %r", ' '.join([path] + argv),
                            exc_info=True)
        return EX_FAILURE
コード例 #8
0
ファイル: worker.py プロジェクト: cliffnyendwe/e-commerce
def detach(path,
           argv,
           logfile=None,
           pidfile=None,
           uid=None,
           gid=None,
           umask=None,
           workdir=None,
           fake=False,
           app=None,
           executable=None,
           hostname=None):
    """Detach program by argv."""
    fake = 1 if C_FAKEFORK else fake
    # `detached()` will attempt to touch the logfile to confirm that error
    # messages won't be lost after detaching stdout/err, but this means we need
    # to pre-format it rather than relying on `setup_logging_subsystem()` like
    # we can elsewhere.
    logfile = node_format(logfile, hostname)
    with detached(logfile,
                  pidfile,
                  uid,
                  gid,
                  umask,
                  workdir,
                  fake,
                  after_forkers=False):
        try:
            if executable is not None:
                path = executable
            os.execv(path, [path] + argv)
            return EX_OK
        except Exception:  # pylint: disable=broad-except
            if app is None:
                from celery import current_app
                app = current_app
            app.log.setup_logging_subsystem('ERROR',
                                            logfile,
                                            hostname=hostname)
            logger.critical("Can't exec %r",
                            ' '.join([path] + argv),
                            exc_info=True)
            return EX_FAILURE
コード例 #9
0
 def node_format(self, s, nodename, **extra):
     return node_format(s, nodename, **extra)
コード例 #10
0
def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
           loglevel=None, logfile=None, pidfile=None, statedb=None,
           **kwargs):
    """Start worker instance.

    Examples
    --------
    $ celery --app=proj worker -l INFO
    $ celery -A proj worker -l INFO -Q hipri,lopri
    $ celery -A proj worker --concurrency=4
    $ celery -A proj worker --concurrency=1000 -P eventlet
    $ celery worker --autoscale=10,0

    """
    app = ctx.obj.app
    if ctx.args:
        try:
            app.config_from_cmdline(ctx.args, namespace='worker')
        except (KeyError, ValueError) as e:
            # TODO: Improve the error messages
            raise click.UsageError(
                "Unable to parse extra configuration from command line.\n"
                f"Reason: {e}", ctx=ctx)
    if kwargs.get('detach', False):
        params = ctx.params.copy()
        params.pop('detach')
        params.pop('logfile')
        params.pop('pidfile')
        params.pop('uid')
        params.pop('gid')
        umask = params.pop('umask')
        workdir = ctx.obj.workdir
        params.pop('hostname')
        executable = params.pop('executable')
        argv = ['-m', 'celery', 'worker']
        for arg, value in params.items():
            if isinstance(value, bool) and value:
                argv.append(f'--{arg}')
            else:
                if value is not None:
                    argv.append(f'--{arg}')
                    argv.append(str(value))
            return detach(sys.executable,
                          argv,
                          logfile=logfile,
                          pidfile=pidfile,
                          uid=uid, gid=gid,
                          umask=umask,
                          workdir=workdir,
                          app=app,
                          executable=executable,
                          hostname=hostname)
        return
    maybe_drop_privileges(uid=uid, gid=gid)
    worker = app.Worker(
        hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
        logfile=logfile,  # node format handled by celery.app.log.setup
        pidfile=node_format(pidfile, hostname),
        statedb=node_format(statedb, hostname),
        no_color=ctx.obj.no_color,
        **kwargs)
    worker.start()
    return worker.exitcode
コード例 #11
0
ファイル: base.py プロジェクト: clverpanda/celery
 def node_format(self, s, nodename, **extra):
     return node_format(s, nodename, **extra)