示例#1
0
class Worker(WorkController):
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def on_before_init(self, purge=False, no_color=None, **kwargs):
        # apply task execution optimizations
        trace.setup_worker_optimizations(self.app)

        # this signal can be used to set up configuration for
        # workers by name.
        conf = self.app.conf
        signals.celeryd_init.send(
            sender=self.hostname,
            instance=self,
            conf=conf,
        )
        self.purge = purge
        self.no_color = no_color
        self._isatty = isatty(sys.stdout)
        self.colored = self.app.log.colored(
            self.logfile,
            enabled=not no_color if no_color is not None else no_color)

    def on_init_namespace(self):
        self.setup_logging()
        # apply task execution optimizations
        trace.setup_worker_optimizations(self.app)

    def on_start(self):
        WorkController.on_start(self)

        # this signal can be used to e.g. change queues after
        # the -Q option has been applied.
        signals.celeryd_after_setup.send(
            sender=self.hostname,
            instance=self,
            conf=self.app.conf,
        )

        if getattr(os, 'getuid', None) and os.getuid() == 0:
            warnings.warn(
                RuntimeWarning(
                    'Running the worker with superuser privileges is discouraged!',
                ))

        if self.purge:
            self.purge_messages()

        # Dump configuration to screen so we have some basic information
        # for when users sends bug reports.
        sys.__stdout__.write(
            str(self.colored.cyan(' \n', self.startup_info())) +
            str(self.colored.reset(self.extra_info() or '')) + '\n')
        self.set_process_status('-active-')
        self.install_platform_tweaks(self)

    def on_consumer_ready(self, consumer):
        signals.worker_ready.send(sender=consumer)
        print('{0.hostname} ready.'.format(self))

    def setup_logging(self, colorize=None):
        if colorize is None and self.no_color is not None:
            colorize = not self.no_color
        self.app.log.setup(self.loglevel,
                           self.logfile,
                           self.redirect_stdouts,
                           self.redirect_stdouts_level,
                           colorize=colorize)

    def purge_messages(self):
        count = self.app.control.purge()
        print('purge: Erased {0} {1} from the queue.\n'.format(
            count, pluralize(count, 'message')))

    def tasklist(self, include_builtins=True):
        tasks = self.app.tasks
        if not include_builtins:
            tasks = (t for t in tasks if not t.startswith('celery.'))
        return '\n'.join('  . {0}'.format(task) for task in sorted(tasks))

    def extra_info(self):
        if self.loglevel <= logging.INFO:
            include_builtins = self.loglevel <= logging.DEBUG
            tasklist = self.tasklist(include_builtins=include_builtins)
            return EXTRA_INFO_FMT.format(tasks=tasklist)

    def startup_info(self):
        app = self.app
        concurrency = string(self.concurrency)
        appr = '{0}:0x{1:x}'.format(app.main or '__main__', id(app))
        if not isinstance(app.loader, AppLoader):
            loader = qualname(app.loader)
            if loader.startswith('celery.loaders'):
                loader = loader[14:]
            appr += ' ({0})'.format(loader)
        if self.autoscale:
            max, min = self.autoscale
            concurrency = '{{min={0}, max={1}}}'.format(min, max)
        pool = self.pool_cls
        if not isinstance(pool, string_t):
            pool = pool.__module__
        concurrency += ' ({0})'.format(pool.split('.')[-1])
        events = 'ON'
        if not self.send_events:
            events = 'OFF (enable -E to monitor this worker)'

        banner = BANNER.format(
            app=appr,
            hostname=self.hostname,
            version=VERSION_BANNER,
            conninfo=self.app.connection().as_uri(),
            concurrency=concurrency,
            events=events,
            queues=app.amqp.queues.format(indent=0, indent_first=False),
        ).splitlines()

        # integrate the ASCII art.
        for i, x in enumerate(banner):
            try:
                banner[i] = ' '.join([ARTLINES[i], banner[i]])
            except IndexError:
                banner[i] = ' ' * 16 + banner[i]
        return '\n'.join(banner) + '\n'

    def install_platform_tweaks(self, worker):
        """Install platform specific tweaks and workarounds."""
        if self.app.IS_OSX:
            self.osx_proxy_detection_workaround()

        # Install signal handler so SIGHUP restarts the worker.
        if not self._isatty:
            # only install HUP handler if detached from terminal,
            # so closing the terminal window doesn't restart the worker
            # into the background.
            if self.app.IS_OSX:
                # OS X can't exec from a process using threads.
                # See http://github.com/celery/celery/issues#issue/152
                install_HUP_not_supported_handler(worker)
            else:
                install_worker_restart_handler(worker)
        install_worker_term_handler(worker)
        install_worker_term_hard_handler(worker)
        install_worker_int_handler(worker)
        install_cry_handler()
        install_rdb_handler()

    def osx_proxy_detection_workaround(self):
        """See http://github.com/celery/celery/issues#issue/161"""
        os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd')

    def set_process_status(self, info):
        return platforms.set_mp_process_title('celeryd',
                                              info='{0} ({1})'.format(
                                                  info,
                                                  platforms.strargv(sys.argv)),
                                              hostname=self.hostname)
示例#2
0
class WorkController(configurated):
    """Unmanaged worker instance."""
    RUN = RUN
    CLOSE = CLOSE
    TERMINATE = TERMINATE

    app = None
    concurrency = from_config()
    loglevel = logging.ERROR
    logfile = from_config('log_file')
    send_events = from_config()
    pool_cls = from_config('pool')
    consumer_cls = from_config('consumer')
    mediator_cls = from_config('mediator')
    timer_cls = from_config('timer')
    timer_precision = from_config('timer_precision')
    autoscaler_cls = from_config('autoscaler')
    autoreloader_cls = from_config('autoreloader')
    schedule_filename = from_config()
    scheduler_cls = from_config('celerybeat_scheduler')
    task_time_limit = from_config()
    task_soft_time_limit = from_config()
    max_tasks_per_child = from_config()
    pool_putlocks = from_config()
    pool_restarts = from_config()
    force_execv = from_config()
    prefetch_multiplier = from_config()
    state_db = from_config()
    disable_rate_limits = from_config()
    worker_lost_wait = from_config()

    _state = None
    _running = 0

    def __init__(self,
                 loglevel=None,
                 hostname=None,
                 ready_callback=noop,
                 queues=None,
                 app=None,
                 pidfile=None,
                 **kwargs):
        self.app = app_or_default(app or self.app)

        self._shutdown_complete = Event()
        self.setup_defaults(kwargs, namespace='celeryd')
        self.app.select_queues(queues)  # select queues subset.

        # Options
        self.loglevel = loglevel or self.loglevel
        self.hostname = hostname or socket.gethostname()
        self.ready_callback = ready_callback
        self._finalize = Finalize(self, self.stop, exitpriority=1)
        self.pidfile = pidfile
        self.pidlock = None
        self.use_eventloop = self.should_use_eventloop()

        # Update celery_include to have all known task modules, so that we
        # ensure all task modules are imported in case an execv happens.
        task_modules = set(task.__class__.__module__
                           for task in self.app.tasks.itervalues())
        self.app.conf.CELERY_INCLUDE = tuple(
            set(self.app.conf.CELERY_INCLUDE) | task_modules, )

        # Initialize boot steps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.components = []
        self.namespace = Namespace(app=self.app).apply(self, **kwargs)

    def start(self):
        """Starts the workers main loop."""
        self._state = self.RUN
        if self.pidfile:
            self.pidlock = platforms.create_pidlock(self.pidfile)
        try:
            for i, component in enumerate(self.components):
                logger.debug('Starting %s...', qualname(component))
                self._running = i + 1
                if component:
                    component.start()
                logger.debug('%s OK!', qualname(component))
        except SystemTerminate:
            self.terminate()
        except Exception, exc:
            logger.error('Unrecoverable error: %r', exc, exc_info=True)
            self.stop()
        except (KeyboardInterrupt, SystemExit):
            self.stop()
示例#3
0
class Worker(configurated):
    WorkController = WorkController

    app = None
    inherit_confopts = (WorkController, )
    loglevel = from_config('log_level')
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self, hostname=None, purge=False, beat=False,
                 queues=None, include=None, app=None, pidfile=None,
                 autoscale=None, autoreload=False, no_execv=False,
                 no_color=None, **kwargs):
        self.app = app = app_or_default(app or self.app)
        self.hostname = hostname or socket.gethostname()

        # this signal can be used to set up configuration for
        # workers by name.
        signals.celeryd_init.send(sender=self.hostname, instance=self,
                                  conf=self.app.conf)

        self.setup_defaults(kwargs, namespace='celeryd')
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2
        self.purge = purge
        self.beat = beat
        self.use_queues = [] if queues is None else queues
        self.queues = None
        self.include = include
        self.pidfile = pidfile
        self.autoscale = None
        self.autoreload = autoreload
        self.no_color = no_color
        self.no_execv = no_execv
        if autoscale:
            max_c, _, min_c = autoscale.partition(',')
            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
        self._isatty = isatty(sys.stdout)

        self.colored = app.log.colored(
            self.logfile,
            enabled=not no_color if no_color is not None else no_color
        )

        if isinstance(self.use_queues, basestring):
            self.use_queues = self.use_queues.split(',')
        if self.include:
            if isinstance(self.include, basestring):
                self.include = self.include.split(',')
            app.conf.CELERY_INCLUDE = (
                tuple(app.conf.CELERY_INCLUDE) + tuple(self.include))
        self.loglevel = mlevel(self.loglevel)

    def run(self):
        self.init_queues()
        self.app.loader.init_worker()

        # this signal can be used to e.g. change queues after
        # the -Q option has been applied.
        signals.celeryd_after_setup.send(sender=self.hostname, instance=self,
                                         conf=self.app.conf)

        if getattr(os, 'getuid', None) and os.getuid() == 0:
            warnings.warn(RuntimeWarning(
                'Running celeryd with superuser privileges is discouraged!'))

        if self.purge:
            self.purge_messages()

        # Dump configuration to screen so we have some basic information
        # for when users sends bug reports.
        print(str(self.colored.cyan(' \n', self.startup_info())) +
              str(self.colored.reset(self.extra_info() or '')))
        self.set_process_status('-active-')

        self.setup_logging()

        # apply task execution optimizations
        trace.setup_worker_optimizations(self.app)

        try:
            self.run_worker()
        except IGNORE_ERRORS:
            pass

    def on_consumer_ready(self, consumer):
        signals.worker_ready.send(sender=consumer)
        print('celery@%s ready.' % safe_str(self.hostname))

    def init_queues(self):
        try:
            self.app.select_queues(self.use_queues)
        except KeyError, exc:
            raise ImproperlyConfigured(UNKNOWN_QUEUE % (self.use_queues, exc))
        if self.app.conf.CELERY_WORKER_DIRECT:
            self.app.amqp.queues.select_add(worker_direct(self.hostname))
示例#4
0
class WorkController(configurated):
    """Unmanaged worker instance."""
    app = None
    concurrency = from_config()
    loglevel = from_config('log_level')
    logfile = from_config('log_file')
    send_events = from_config()
    pool_cls = from_config('pool')
    consumer_cls = from_config('consumer')
    mediator_cls = from_config('mediator')
    timer_cls = from_config('timer')
    timer_precision = from_config('timer_precision')
    autoscaler_cls = from_config('autoscaler')
    autoreloader_cls = from_config('autoreloader')
    schedule_filename = from_config()
    scheduler_cls = from_config('celerybeat_scheduler')
    task_time_limit = from_config()
    task_soft_time_limit = from_config()
    max_tasks_per_child = from_config()
    pool_putlocks = from_config()
    pool_restarts = from_config()
    force_execv = from_config()
    prefetch_multiplier = from_config()
    state_db = from_config()
    disable_rate_limits = from_config()
    worker_lost_wait = from_config()

    pidlock = None

    def __init__(self, app=None, hostname=None, **kwargs):
        self.app = app_or_default(app or self.app)
        self.hostname = hostname or socket.gethostname()
        self.app.loader.init_worker()
        self.on_before_init(**kwargs)

        self._finalize = Finalize(self, self.stop, exitpriority=1)
        self.setup_instance(**self.prepare_args(**kwargs))

    def setup_instance(self,
                       queues=None,
                       ready_callback=None,
                       pidfile=None,
                       include=None,
                       **kwargs):
        self.pidfile = pidfile
        self.setup_defaults(kwargs, namespace='celeryd')
        self.setup_queues(queues)
        self.setup_includes(include)

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready
        self.use_eventloop = self.should_use_eventloop()

        signals.worker_init.send(sender=self)

        # Initialize boot steps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.components = []
        self.namespace = Namespace(app=self.app,
                                   on_start=self.on_start,
                                   on_close=self.on_close,
                                   on_stopped=self.on_stopped)
        self.namespace.apply(self, **kwargs)

    def on_before_init(self, **kwargs):
        pass

    def on_start(self):
        if self.pidfile:
            self.pidlock = platforms.create_pidlock(self.pidfile)

    def on_consumer_ready(self, consumer):
        pass

    def on_close(self):
        self.app.loader.shutdown_worker()

    def on_stopped(self):
        self.timer.stop()
        self.consumer.close_connection()

        if self.pidlock:
            self.pidlock.release()

    def setup_queues(self, queues):
        if isinstance(queues, basestring):
            queues = queues.split(',')
        self.queues = queues
        try:
            self.app.select_queues(queues)
        except KeyError as exc:
            raise ImproperlyConfigured(UNKNOWN_QUEUE.format(queues, exc))
        if self.app.conf.CELERY_WORKER_DIRECT:
            self.app.amqp.queues.select_add(worker_direct(self.hostname))

    def setup_includes(self, includes):
        # Update celery_include to have all known task modules, so that we
        # ensure all task modules are imported in case an execv happens.
        inc = self.app.conf.CELERY_INCLUDE
        if includes:
            if isinstance(includes, basestring):
                includes = includes.split(',')
            inc = self.app.conf.CELERY_INCLUDE = tuple(inc) + tuple(includes)
        self.include = includes
        task_modules = set(task.__class__.__module__
                           for task in self.app.tasks.itervalues())
        self.app.conf.CELERY_INCLUDE = tuple(set(inc) | task_modules)

    def prepare_args(self, **kwargs):
        return kwargs

    def start(self):
        """Starts the workers main loop."""
        try:
            self.namespace.start(self)
        except SystemTerminate:
            self.terminate()
        except Exception as exc:
            logger.error('Unrecoverable error: %r', exc, exc_info=True)
            self.stop()
        except (KeyboardInterrupt, SystemExit):
            self.stop()

    def process_task_sem(self, req):
        return self._quick_acquire(self.process_task, req)

    def process_task(self, req):
        """Process task by sending it to the pool of workers."""
        try:
            req.execute_using_pool(self.pool)
        except TaskRevokedError:
            try:
                self._quick_release()  # Issue 877
            except AttributeError:
                pass
        except Exception as exc:
            logger.critical('Internal error: %r\n%s',
                            exc,
                            traceback.format_exc(),
                            exc_info=True)
        except SystemTerminate:
            self.terminate()
            raise
        except BaseException as exc:
            self.stop()
            raise exc

    def signal_consumer_close(self):
        try:
            self.consumer.close()
        except AttributeError:
            pass

    def should_use_eventloop(self):
        return (detect_environment() == 'default'
                and self.app.connection().is_evented
                and not self.app.IS_WINDOWS)

    def stop(self, in_sighandler=False):
        """Graceful shutdown of the worker server."""
        self.signal_consumer_close()
        if not in_sighandler or self.pool.signal_safe:
            self._shutdown(warm=True)

    def terminate(self, in_sighandler=False):
        """Not so graceful shutdown of the worker server."""
        self.signal_consumer_close()
        if not in_sighandler or self.pool.signal_safe:
            self._shutdown(warm=False)

    def _shutdown(self, warm=True):
        self.namespace.stop(self, terminate=not warm)
        self.namespace.join()

    def reload(self, modules=None, reload=False, reloader=None):
        modules = self.app.loader.task_modules if modules is None else modules
        imp = self.app.loader.import_from_cwd

        for module in set(modules or ()):
            if module not in sys.modules:
                logger.debug('importing module %s', module)
                imp(module)
            elif reload:
                logger.debug('reloading module %s', module)
                reload_from_cwd(sys.modules[module], reloader)
        self.pool.restart()

    @property
    def _state(self):
        return self.namespace.state

    @property
    def state(self):
        return state
示例#5
0
class Beat(configurated):
    Service = beat.Service

    app = None
    loglevel = from_config('log_level')
    logfile = from_config('log_file')
    schedule = from_config('schedule_filename')
    scheduler_cls = from_config('scheduler')
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self, max_interval=None, app=None,
            socket_timeout=30, pidfile=None, no_color=None, **kwargs):
        """Starts the celerybeat task scheduler."""
        self.app = app = app_or_default(app or self.app)
        self.setup_defaults(kwargs, namespace='celerybeat')

        self.max_interval = max_interval
        self.socket_timeout = socket_timeout
        self.no_color = no_color
        self.colored = app.log.colored(self.logfile,
            enabled=not no_color if no_color is not None else no_color,
        )
        self.pidfile = pidfile

        if not isinstance(self.loglevel, int):
            self.loglevel = LOG_LEVELS[self.loglevel.upper()]

    def run(self):
        print(str(self.colored.cyan(
                    'celerybeat v{0} is starting.'.format(VERSION_BANNER))))
        self.init_loader()
        self.set_process_title()
        self.start_scheduler()

    def setup_logging(self, colorize=None):
        if colorize is None and self.no_color is not None:
            colorize = not self.no_color
        self.app.log.setup(self.loglevel, self.logfile,
                           self.redirect_stdouts, self.redirect_stdouts_level,
                           colorize=colorize)

    def start_scheduler(self):
        c = self.colored
        if self.pidfile:
            platforms.create_pidlock(self.pidfile)
        beat = self.Service(app=self.app,
                            max_interval=self.max_interval,
                            scheduler_cls=self.scheduler_cls,
                            schedule_filename=self.schedule)

        print(str(c.blue('__    ', c.magenta('-'),
                  c.blue('    ... __   '), c.magenta('-'),
                  c.blue('        _\n'),
                  c.reset(self.startup_info(beat)))))
        self.setup_logging()
        if self.socket_timeout:
            logger.debug('Setting default socket timeout to %r',
                         self.socket_timeout)
            socket.setdefaulttimeout(self.socket_timeout)
        try:
            self.install_sync_handler(beat)
            beat.start()
        except Exception as exc:
            logger.critical('celerybeat raised exception %s: %r',
                            exc.__class__, exc,
                            exc_info=True)

    def init_loader(self):
        # Run the worker init handler.
        # (Usually imports task modules and such.)
        self.app.loader.init_worker()
        self.app.finalize()

    def startup_info(self, beat):
        scheduler = beat.get_scheduler(lazy=True)
        return STARTUP_INFO_FMT.format(
            conninfo=self.app.connection().as_uri(),
            logfile=self.logfile or '[stderr]',
            loglevel=LOG_LEVELS[self.loglevel],
            loader=qualname(self.app.loader),
            scheduler=qualname(scheduler),
            scheduler_info=scheduler.info,
            hmax_interval=humanize_seconds(beat.max_interval),
            max_interval=beat.max_interval,
            )

    def set_process_title(self):
        arg_start = 'manage' in sys.argv[0] and 2 or 1
        platforms.set_process_title('celerybeat',
                               info=' '.join(sys.argv[arg_start:]))

    def install_sync_handler(self, beat):
        """Install a `SIGTERM` + `SIGINT` handler that saves
        the celerybeat schedule."""

        def _sync(signum, frame):
            beat.sync()
            raise SystemExit()

        platforms.signals.update(SIGTERM=_sync, SIGINT=_sync)
示例#6
0
文件: worker.py 项目: unixomg/celery
class Worker(configurated):
    WorkController = WorkController

    app = None
    inherit_confopts = (WorkController, )
    loglevel = from_config("log_level")
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self, hostname=None, purge=False, beat=False,
            queues=None, include=None, app=None, pidfile=None,
            autoscale=None, autoreload=False, no_execv=False, **kwargs):
        self.app = app = app_or_default(app or self.app)
        self.hostname = hostname or socket.gethostname()

        # this signal can be used to set up configuration for
        # workers by name.
        signals.celeryd_init.send(sender=self.hostname, instance=self,
                                  conf=self.app.conf)

        self.setup_defaults(kwargs, namespace="celeryd")
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2
        self.purge = purge
        self.beat = beat
        self.use_queues = [] if queues is None else queues
        self.queues = None
        self.include = include
        self.pidfile = pidfile
        self.autoscale = None
        self.autoreload = autoreload
        self.no_execv = no_execv
        if autoscale:
            max_c, _, min_c = autoscale.partition(",")
            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
        self._isatty = isatty(sys.stdout)

        self.colored = app.log.colored(self.logfile)

        if isinstance(self.use_queues, basestring):
            self.use_queues = self.use_queues.split(",")
        if self.include:
            if isinstance(self.include, basestring):
                self.include = self.include.split(",")
            app.conf.CELERY_INCLUDE = (
                tuple(app.conf.CELERY_INCLUDE) + tuple(self.include))
        self.loglevel = mlevel(self.loglevel)

    def run(self):
        self.init_queues()
        self.app.loader.init_worker()

        if getattr(os, "getuid", None) and os.getuid() == 0:
            warnings.warn(RuntimeWarning(
                "Running celeryd with superuser privileges is discouraged!"))

        if self.purge:
            self.purge_messages()

        # Dump configuration to screen so we have some basic information
        # for when users sends bug reports.
        print(str(self.colored.cyan(" \n", self.startup_info())) +
              str(self.colored.reset(self.extra_info() or "")))
        self.set_process_status("-active-")

        self.redirect_stdouts_to_logger()
        try:
            self.run_worker()
        except IGNORE_ERRORS:
            pass

    def on_consumer_ready(self, consumer):
        signals.worker_ready.send(sender=consumer)
        print("celery@%s has started." % self.hostname)

    def init_queues(self):
        try:
            self.app.select_queues(self.use_queues)
        except KeyError, exc:
            raise ImproperlyConfigured(UNKNOWN_QUEUE % (self.use_queues, exc))
示例#7
0
class Beat(configurated):
    Service = beat.Service

    app = None
    loglevel = from_config('log_level')
    logfile = from_config('log_file')
    schedule = from_config('schedule_filename')
    scheduler_cls = from_config('scheduler')
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self,
                 max_interval=None,
                 app=None,
                 socket_timeout=30,
                 pidfile=None,
                 no_color=None,
                 **kwargs):
        """Starts the celerybeat task scheduler."""
        self.app = app = app_or_default(app or self.app)
        self.setup_defaults(kwargs, namespace='celerybeat')

        self.max_interval = max_interval
        self.socket_timeout = socket_timeout
        self.no_color = no_color
        self.colored = app.log.colored(
            self.logfile,
            enabled=not no_color if no_color is not None else no_color,
        )
        self.pidfile = pidfile

        if not isinstance(self.loglevel, int):
            self.loglevel = LOG_LEVELS[self.loglevel.upper()]

    def run(self):
        print(
            str(
                self.colored.cyan('celerybeat v%s is starting.' %
                                  VERSION_BANNER)))
        self.init_loader()
        self.set_process_title()
        self.start_scheduler()

    def setup_logging(self, colorize=None):
        if colorize is None and self.no_color is not None:
            colorize = not self.no_color
        self.app.log.setup(self.loglevel,
                           self.logfile,
                           self.redirect_stdouts,
                           self.redirect_stdouts_level,
                           colorize=colorize)

    def start_scheduler(self):
        c = self.colored
        if self.pidfile:
            platforms.create_pidlock(self.pidfile)
        beat = self.Service(app=self.app,
                            max_interval=self.max_interval,
                            scheduler_cls=self.scheduler_cls,
                            schedule_filename=self.schedule)

        print(
            str(
                c.blue('__    ', c.magenta('-'), c.blue('    ... __   '),
                       c.magenta('-'), c.blue('        _\n'),
                       c.reset(self.startup_info(beat)))))
        self.setup_logging()
        if self.socket_timeout:
            logger.debug('Setting default socket timeout to %r',
                         self.socket_timeout)
            socket.setdefaulttimeout(self.socket_timeout)
        try:
            self.install_sync_handler(beat)
            beat.start()
        except Exception, exc:
            logger.critical('celerybeat raised exception %s: %r',
                            exc.__class__,
                            exc,
                            exc_info=True)
示例#8
0
class Beat(configurated):
    Service = beat.Service

    app = None
    loglevel = from_config("log_level")
    logfile = from_config("log_file")
    schedule = from_config("schedule_filename")
    scheduler_cls = from_config("scheduler")
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self,
                 max_interval=None,
                 app=None,
                 socket_timeout=30,
                 pidfile=None,
                 **kwargs):
        """Starts the celerybeat task scheduler."""
        self.app = app = app_or_default(app or self.app)
        self.setup_defaults(kwargs, namespace="celerybeat")

        self.max_interval = max_interval
        self.socket_timeout = socket_timeout
        self.colored = app.log.colored(self.logfile)
        self.pidfile = pidfile

        if not isinstance(self.loglevel, int):
            self.loglevel = LOG_LEVELS[self.loglevel.upper()]

    def run(self):
        self.setup_logging()
        print(
            str(self.colored.cyan("celerybeat v%s is starting." %
                                  __version__)))
        self.init_loader()
        self.set_process_title()
        self.start_scheduler()

    def setup_logging(self):
        handled = self.app.log.setup_logging_subsystem(loglevel=self.loglevel,
                                                       logfile=self.logfile)
        if self.redirect_stdouts and not handled:
            self.app.log.redirect_stdouts_to_logger(
                logger, loglevel=self.redirect_stdouts_level)

    def start_scheduler(self):
        c = self.colored
        if self.pidfile:
            platforms.create_pidlock(self.pidfile)
        beat = self.Service(app=self.app,
                            max_interval=self.max_interval,
                            scheduler_cls=self.scheduler_cls,
                            schedule_filename=self.schedule)

        print(
            str(
                c.blue("__    ", c.magenta("-"), c.blue("    ... __   "),
                       c.magenta("-"), c.blue("        _\n"),
                       c.reset(self.startup_info(beat)))))
        if self.socket_timeout:
            logger.debug("Setting default socket timeout to %r",
                         self.socket_timeout)
            socket.setdefaulttimeout(self.socket_timeout)
        try:
            self.install_sync_handler(beat)
            beat.start()
        except Exception, exc:
            logger.critical("celerybeat raised exception %s: %r",
                            exc.__class__,
                            exc,
                            exc_info=True)
示例#9
0
class WorkController(configurated):
    """Unmanaged worker instance."""
    RUN = RUN
    CLOSE = CLOSE
    TERMINATE = TERMINATE

    app = None
    concurrency = from_config()
    loglevel = logging.ERROR
    logfile = from_config("log_file")
    send_events = from_config()
    pool_cls = from_config("pool")
    consumer_cls = from_config("consumer")
    mediator_cls = from_config("mediator")
    timer_cls = from_config("timer")
    timer_precision = from_config("timer_precision")
    autoscaler_cls = from_config("autoscaler")
    autoreloader_cls = from_config("autoreloader")
    schedule_filename = from_config()
    scheduler_cls = from_config("celerybeat_scheduler")
    task_time_limit = from_config()
    task_soft_time_limit = from_config()
    max_tasks_per_child = from_config()
    pool_putlocks = from_config()
    force_execv = from_config()
    prefetch_multiplier = from_config()
    state_db = from_config()
    disable_rate_limits = from_config()
    worker_lost_wait = from_config()

    _state = None
    _running = 0

    def __init__(self,
                 loglevel=None,
                 hostname=None,
                 ready_callback=noop,
                 queues=None,
                 app=None,
                 pidfile=None,
                 **kwargs):
        self.app = app_or_default(app or self.app)
        # all new threads start without a current app, so if an app is not
        # passed on to the thread it will fall back to the "default app",
        # which then could be the wrong app.  So for the worker
        # we set this to always return our app.  This is a hack,
        # and means that only a single app can be used for workers
        # running in the same process.
        set_default_app(self.app)
        self.app.finalize()
        trace._tasks = self.app._tasks

        self._shutdown_complete = Event()
        self.setup_defaults(kwargs, namespace="celeryd")
        self.app.select_queues(queues)  # select queues subset.

        # Options
        self.loglevel = loglevel or self.loglevel
        self.hostname = hostname or socket.gethostname()
        self.ready_callback = ready_callback
        self._finalize = Finalize(self, self.stop, exitpriority=1)
        self.pidfile = pidfile
        self.pidlock = None
        self.use_eventloop = (detect_environment() == "default"
                              and self.app.broker_connection().is_evented)

        # Initialize boot steps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.components = []
        self.namespace = Namespace(app=self.app).apply(self, **kwargs)

    def start(self):
        """Starts the workers main loop."""
        self._state = self.RUN
        if self.pidfile:
            self.pidlock = platforms.create_pidlock(self.pidfile)
        try:
            for i, component in enumerate(self.components):
                logger.debug("Starting %s...", qualname(component))
                self._running = i + 1
                if component:
                    component.start()
                logger.debug("%s OK!", qualname(component))
        except SystemTerminate:
            self.terminate()
        except Exception, exc:
            logger.error("Unrecoverable error: %r", exc, exc_info=True)
            self.stop()
        except (KeyboardInterrupt, SystemExit):
            self.stop()
示例#10
0
class WorkController(configurated):
    """Unmanaged worker instance."""
    RUN = RUN
    CLOSE = CLOSE
    TERMINATE = TERMINATE

    app = None
    concurrency = from_config()
    loglevel = from_config('log_level')
    logfile = from_config('log_file')
    send_events = from_config()
    pool_cls = from_config('pool')
    consumer_cls = from_config('consumer')
    mediator_cls = from_config('mediator')
    timer_cls = from_config('timer')
    timer_precision = from_config('timer_precision')
    autoscaler_cls = from_config('autoscaler')
    autoreloader_cls = from_config('autoreloader')
    schedule_filename = from_config()
    scheduler_cls = from_config('celerybeat_scheduler')
    task_time_limit = from_config()
    task_soft_time_limit = from_config()
    max_tasks_per_child = from_config()
    pool_putlocks = from_config()
    pool_restarts = from_config()
    force_execv = from_config()
    prefetch_multiplier = from_config()
    state_db = from_config()
    disable_rate_limits = from_config()
    worker_lost_wait = from_config()

    _state = None
    _running = 0
    pidlock = None

    def __init__(self, app=None, hostname=None, **kwargs):
        self.app = app_or_default(app or self.app)
        # all new threads start without a current app, so if an app is not
        # passed on to the thread it will fall back to the "default app",
        # which then could be the wrong app.  So for the worker
        # we set this to always return our app.  This is a hack,
        # and means that only a single app can be used for workers
        # running in the same process.
        set_default_app(self.app)
        self.app.finalize()
        trace._tasks = self.app._tasks  # optimization
        self.hostname = hostname or socket.gethostname()
        self.on_before_init(**kwargs)

        self._finalize = Finalize(self, self.stop, exitpriority=1)
        self._shutdown_complete = Event()
        self.setup_instance(**self.prepare_args(**kwargs))

    def on_before_init(self, **kwargs):
        pass

    def on_start(self):
        pass

    def on_consumer_ready(self, consumer):
        pass

    def setup_instance(self,
                       queues=None,
                       ready_callback=None,
                       pidfile=None,
                       include=None,
                       **kwargs):
        self.pidfile = pidfile
        self.app.loader.init_worker()
        self.setup_defaults(kwargs, namespace='celeryd')
        self.setup_queues(queues)
        self.setup_includes(include)

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready
        self.use_eventloop = self.should_use_eventloop()

        signals.worker_init.send(sender=self)

        # Initialize boot steps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.components = []
        self.namespace = Namespace(app=self.app).apply(self, **kwargs)

    def setup_queues(self, queues):
        if isinstance(queues, basestring):
            queues = queues.split(',')
        self.queues = queues
        try:
            self.app.select_queues(queues)
        except KeyError as exc:
            raise ImproperlyConfigured(UNKNOWN_QUEUE.format(queues, exc))
        if self.app.conf.CELERY_WORKER_DIRECT:
            self.app.amqp.queues.select_add(worker_direct(self.hostname))

    def setup_includes(self, includes):
        # Update celery_include to have all known task modules, so that we
        # ensure all task modules are imported in case an execv happens.
        inc = self.app.conf.CELERY_INCLUDE
        if includes:
            if isinstance(includes, basestring):
                includes = includes.split(',')
            inc = self.app.conf.CELERY_INCLUDE = tuple(inc) + tuple(includes)
        self.include = includes
        task_modules = set(task.__class__.__module__
                           for task in self.app.tasks.itervalues())
        self.app.conf.CELERY_INCLUDE = tuple(set(inc) | task_modules)

    def prepare_args(self, **kwargs):
        return kwargs

    def start(self):
        """Starts the workers main loop."""
        self.on_start()
        self._state = self.RUN
        if self.pidfile:
            self.pidlock = platforms.create_pidlock(self.pidfile)
        try:
            for i, component in enumerate(self.components):
                logger.debug('Starting %s...', qualname(component))
                self._running = i + 1
                if component:
                    component.start()
                logger.debug('%s OK!', qualname(component))
        except SystemTerminate:
            self.terminate()
        except Exception as exc:
            logger.error('Unrecoverable error: %r', exc, exc_info=True)
            self.stop()
        except (KeyboardInterrupt, SystemExit):
            self.stop()

        try:
            # Will only get here if running green,
            # makes sure all greenthreads have exited.
            self._shutdown_complete.wait()
        except IGNORE_ERRORS:
            pass

    run = start  # XXX Compat

    def process_task_sem(self, req):
        return self._quick_acquire(self.process_task, req)

    def process_task(self, req):
        """Process task by sending it to the pool of workers."""
        try:
            req.execute_using_pool(self.pool)
        except TaskRevokedError:
            try:
                self._quick_release()  # Issue 877
            except AttributeError:
                pass
        except Exception as exc:
            logger.critical('Internal error: %r\n%s',
                            exc,
                            traceback.format_exc(),
                            exc_info=True)
        except SystemTerminate:
            self.terminate()
            raise
        except BaseException as exc:
            self.stop()
            raise exc

    def signal_consumer_close(self):
        try:
            self.consumer.close()
        except AttributeError:
            pass

    def should_use_eventloop(self):
        return (detect_environment() == 'default'
                and self.app.connection().is_evented
                and not self.app.IS_WINDOWS)

    def stop(self, in_sighandler=False):
        """Graceful shutdown of the worker server."""
        self.signal_consumer_close()
        if not in_sighandler or self.pool.signal_safe:
            self._shutdown(warm=True)

    def terminate(self, in_sighandler=False):
        """Not so graceful shutdown of the worker server."""
        self.signal_consumer_close()
        if not in_sighandler or self.pool.signal_safe:
            self._shutdown(warm=False)

    def _shutdown(self, warm=True):
        what = 'Stopping' if warm else 'Terminating'
        socket_timeout = socket.getdefaulttimeout()
        socket.setdefaulttimeout(SHUTDOWN_SOCKET_TIMEOUT)  # Issue 975

        if self._state in (self.CLOSE, self.TERMINATE):
            return

        self.app.loader.shutdown_worker()

        if self.pool:
            self.pool.close()

        if self._state != self.RUN or self._running != len(self.components):
            # Not fully started, can safely exit.
            self._state = self.TERMINATE
            self._shutdown_complete.set()
            return
        self._state = self.CLOSE

        for component in reversed(self.components):
            logger.debug('%s %s...', what, qualname(component))
            if component:
                stop = component.stop
                if not warm:
                    stop = getattr(component, 'terminate', None) or stop
                stop()

        self.timer.stop()
        self.consumer.close_connection()

        if self.pidlock:
            self.pidlock.release()
        self._state = self.TERMINATE
        socket.setdefaulttimeout(socket_timeout)
        self._shutdown_complete.set()

    def reload(self, modules=None, reload=False, reloader=None):
        modules = self.app.loader.task_modules if modules is None else modules
        imp = self.app.loader.import_from_cwd

        for module in set(modules or ()):
            if module not in sys.modules:
                logger.debug('importing module %s', module)
                imp(module)
            elif reload:
                logger.debug('reloading module %s', module)
                reload_from_cwd(sys.modules[module], reloader)
        self.pool.restart()

    @property
    def state(self):
        return state
示例#11
0
class WorkController(configurated):
    """Unmanaged worker instance."""
    app = None
    concurrency = from_config()
    loglevel = from_config('log_level')
    logfile = from_config('log_file')
    send_events = from_config()
    pool_cls = from_config('pool')
    consumer_cls = from_config('consumer')
    timer_cls = from_config('timer')
    timer_precision = from_config('timer_precision')
    autoscaler_cls = from_config('autoscaler')
    autoreloader_cls = from_config('autoreloader')
    schedule_filename = from_config()
    scheduler_cls = from_config('celerybeat_scheduler')
    task_time_limit = from_config()
    task_soft_time_limit = from_config()
    max_tasks_per_child = from_config()
    pool_putlocks = from_config()
    pool_restarts = from_config()
    force_execv = from_config()
    prefetch_multiplier = from_config()
    state_db = from_config()
    disable_rate_limits = from_config()
    worker_lost_wait = from_config()

    pidlock = None
    namespace = None
    pool = None
    semaphore = None

    class Namespace(bootsteps.Namespace):
        """Worker bootstep namespace."""
        name = 'Worker'
        default_steps = set([
            'celery.worker.components:Hub',
            'celery.worker.components:Queues',
            'celery.worker.components:Pool',
            'celery.worker.components:Beat',
            'celery.worker.components:Timer',
            'celery.worker.components:StateDB',
            'celery.worker.components:Consumer',
            'celery.worker.autoscale:WorkerComponent',
            'celery.worker.autoreload:WorkerComponent',
        ])

    def __init__(self, app=None, hostname=None, **kwargs):
        self.app = app_or_default(app or self.app)
        self.hostname = default_nodename(hostname)
        self.app.loader.init_worker()
        self.on_before_init(**kwargs)

        self._finalize = [
            Finalize(self, self.stop, exitpriority=1),
            Finalize(self, self._send_worker_shutdown, exitpriority=10),
        ]
        self.setup_instance(**self.prepare_args(**kwargs))

    def setup_instance(self,
                       queues=None,
                       ready_callback=None,
                       pidfile=None,
                       include=None,
                       use_eventloop=None,
                       **kwargs):
        self.pidfile = pidfile
        self.setup_defaults(kwargs, namespace='celeryd')
        self.setup_queues(queues)
        self.setup_includes(include)

        # Set default concurrency
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2

        # Options
        self.loglevel = mlevel(self.loglevel)
        self.ready_callback = ready_callback or self.on_consumer_ready

        # this connection is not established, only used for params
        self._conninfo = self.app.connection()
        self.use_eventloop = (self.should_use_eventloop()
                              if use_eventloop is None else use_eventloop)
        self.options = kwargs

        signals.worker_init.send(sender=self)

        # Initialize bootsteps
        self.pool_cls = _concurrency.get_implementation(self.pool_cls)
        self.steps = []
        self.on_init_namespace()
        self.namespace = self.Namespace(app=self.app,
                                        on_start=self.on_start,
                                        on_close=self.on_close,
                                        on_stopped=self.on_stopped)
        self.namespace.apply(self, **kwargs)

    def on_init_namespace(self):
        pass

    def on_before_init(self, **kwargs):
        pass

    def on_start(self):
        if self.pidfile:
            self.pidlock = platforms.create_pidlock(self.pidfile)

    def on_consumer_ready(self, consumer):
        pass

    def on_close(self):
        self.app.loader.shutdown_worker()

    def on_stopped(self):
        self.timer.stop()
        self.consumer.shutdown()

        if self.pidlock:
            self.pidlock.release()

    def setup_queues(self, queues):
        if isinstance(queues, string_t):
            queues = queues.split(',')
        self.queues = queues
        try:
            self.app.select_queues(queues)
        except KeyError as exc:
            raise ImproperlyConfigured(UNKNOWN_QUEUE.format(queues, exc))
        if self.app.conf.CELERY_WORKER_DIRECT:
            self.app.amqp.queues.select_add(worker_direct(self.hostname))

    def setup_includes(self, includes):
        # Update celery_include to have all known task modules, so that we
        # ensure all task modules are imported in case an execv happens.
        inc = self.app.conf.CELERY_INCLUDE
        if includes:
            if isinstance(includes, string_t):
                includes = includes.split(',')
            inc = self.app.conf.CELERY_INCLUDE = tuple(inc) + tuple(includes)
        self.include = includes
        task_modules = set(task.__class__.__module__
                           for task in values(self.app.tasks))
        self.app.conf.CELERY_INCLUDE = tuple(set(inc) | task_modules)

    def prepare_args(self, **kwargs):
        return kwargs

    def _send_worker_shutdown(self):
        signals.worker_shutdown.send(sender=self)

    def start(self):
        """Starts the workers main loop."""
        try:
            self.namespace.start(self)
        except SystemTerminate:
            self.terminate()
        except Exception as exc:
            logger.error('Unrecoverable error: %r', exc, exc_info=True)
            self.stop()
        except (KeyboardInterrupt, SystemExit):
            self.stop()

    def _process_task_sem(self, req):
        return self._quick_acquire(self._process_task, req)

    def _process_task(self, req):
        """Process task by sending it to the pool of workers."""
        try:
            req.execute_using_pool(self.pool)
        except TaskRevokedError:
            try:
                self._quick_release()  # Issue 877
            except AttributeError:
                pass
        except Exception as exc:
            logger.critical('Internal error: %r\n%s',
                            exc,
                            traceback.format_exc(),
                            exc_info=True)
        except SystemTerminate:
            self.terminate()
            raise
        except BaseException as exc:
            self.stop()
            raise exc

    def signal_consumer_close(self):
        try:
            self.consumer.close()
        except AttributeError:
            pass

    def should_use_eventloop(self):
        return (detect_environment() == 'default' and self._conninfo.is_evented
                and not self.app.IS_WINDOWS)

    def stop(self, in_sighandler=False):
        """Graceful shutdown of the worker server."""
        self.signal_consumer_close()
        if not in_sighandler or self.pool.signal_safe:
            self._shutdown(warm=True)

    def terminate(self, in_sighandler=False):
        """Not so graceful shutdown of the worker server."""
        self.signal_consumer_close()
        if not in_sighandler or self.pool.signal_safe:
            self._shutdown(warm=False)

    def _shutdown(self, warm=True):
        # if namespace does not exist it means that we had an
        # error before the bootsteps could be initialized.
        if self.namespace is not None:
            self.namespace.stop(self, terminate=not warm)
            self.namespace.join()

    def reload(self, modules=None, reload=False, reloader=None):
        modules = self.app.loader.task_modules if modules is None else modules
        imp = self.app.loader.import_from_cwd

        for module in set(modules or ()):
            if module not in sys.modules:
                logger.debug('importing module %s', module)
                imp(module)
            elif reload:
                logger.debug('reloading module %s', module)
                reload_from_cwd(sys.modules[module], reloader)
        self.pool.restart()

    def info(self):
        return {
            'total': self.state.total_count,
            'pid': os.getpid(),
            'clock': str(self.app.clock)
        }

    def rusage(self):
        s = resource.getrusage(resource.RUSAGE_SELF)
        return {
            'utime': s.ru_utime,
            'stime': s.ru_stime,
            'maxrss': s.ru_maxrss,
            'ixrss': s.ru_ixrss,
            'idrss': s.ru_idrss,
            'isrss': s.ru_isrss,
            'minflt': s.ru_minflt,
            'majflt': s.ru_majflt,
            'nswap': s.ru_nswap,
            'inblock': s.ru_inblock,
            'oublock': s.ru_oublock,
            'msgsnd': s.ru_msgsnd,
            'msgrcv': s.ru_msgrcv,
            'nsignals': s.ru_nsignals,
            'nvcsw': s.ru_nvcsw,
            'nivcsw': s.ru_nivcsw,
        }

    def stats(self):
        info = self.info()
        info.update(self.namespace.info(self))
        info.update(self.consumer.namespace.info(self.consumer))
        info.update(rusage=self.rusage())
        return info

    @property
    def _state(self):
        return self.namespace.state

    @property
    def state(self):
        return state
示例#12
0
class Worker(configurated):
    WorkController = WorkController

    app = None
    inherit_confopts = (WorkController, )
    loglevel = from_config("log_level")
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self,
                 hostname=None,
                 discard=False,
                 embed_clockservice=False,
                 queues=None,
                 include=None,
                 app=None,
                 pidfile=None,
                 autoscale=None,
                 autoreload=False,
                 **kwargs):
        self.app = app = app_or_default(app or self.app)
        self.hostname = hostname or socket.gethostname()

        # this signal can be used to set up configuration for
        # workers by name.
        signals.celeryd_init.send(sender=self.hostname,
                                  instance=self,
                                  conf=self.app.conf)

        self.setup_defaults(kwargs, namespace="celeryd")
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2
        self.discard = discard
        self.embed_clockservice = embed_clockservice
        if self.app.IS_WINDOWS and self.embed_clockservice:
            self.die("-B option does not work on Windows.  "
                     "Please run celerybeat as a separate service.")
        self.use_queues = [] if queues is None else queues
        self.queues = None
        self.include = [] if include is None else include
        self.pidfile = pidfile
        self.autoscale = None
        self.autoreload = autoreload
        if autoscale:
            max_c, _, min_c = autoscale.partition(",")
            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
        self._isatty = isatty(sys.stdout)

        self.colored = app.log.colored(self.logfile)

        if isinstance(self.use_queues, basestring):
            self.use_queues = self.use_queues.split(",")
        if isinstance(self.include, basestring):
            self.include = self.include.split(",")

        try:
            self.loglevel = mlevel(self.loglevel)
        except KeyError:
            self.die("Unknown level %r. Please use one of %s." %
                     (self.loglevel, "|".join(l for l in LOG_LEVELS.keys()
                                              if isinstance(l, basestring))))

    def run(self):
        self.init_loader()
        self.init_queues()
        self.worker_init()
        self.redirect_stdouts_to_logger()

        if getattr(os, "getuid", None) and os.getuid() == 0:
            warnings.warn(
                RuntimeWarning(
                    "Running celeryd with superuser privileges is discouraged!"
                ))

        if self.discard:
            self.purge_messages()

        # Dump configuration to screen so we have some basic information
        # for when users sends bug reports.
        print(
            str(self.colored.cyan(" \n", self.startup_info())) +
            str(self.colored.reset(self.extra_info())))
        self.set_process_status("-active-")

        try:
            self.run_worker()
        except IGNORE_ERRORS:
            pass

    def on_consumer_ready(self, consumer):
        signals.worker_ready.send(sender=consumer)
        print("celery@%s has started." % self.hostname)

    def init_queues(self):
        try:
            self.app.select_queues(self.use_queues)
        except KeyError, exc:
            raise ImproperlyConfigured(UNKNOWN_QUEUE_ERROR %
                                       (self.use_queues, exc))