Пример #1
0
 def test_setup_queues_worker_direct(self):
     self.app.conf.CELERY_WORKER_DIRECT = True
     self.app.amqp.__dict__['queues'] = Mock()
     self.worker.setup_queues({})
     self.app.amqp.queues.select_add.assert_called_with(
         worker_direct(self.worker.hostname),
     )
Пример #2
0
 def test_setup_queues_worker_direct(self):
     self.app.conf.CELERY_WORKER_DIRECT = True
     self.app.amqp.__dict__['queues'] = Mock()
     self.worker.setup_queues({})
     self.app.amqp.queues.select_add.assert_called_with(
         worker_direct(self.worker.hostname),
     )
Пример #3
0
 def init_queues(self):
     try:
         self.app.select_queues(self.use_queues)
     except KeyError as exc:
         raise ImproperlyConfigured(
                 UNKNOWN_QUEUE.format(self.use_queues, exc))
     if self.app.conf.CELERY_WORKER_DIRECT:
         self.app.amqp.queues.select_add(worker_direct(self.hostname))
Пример #4
0
 def test_setup_queues_worker_direct(self):
     self.app.conf.CELERY_WORKER_DIRECT = True
     _qs, self.app.amqp.__dict__["queues"] = self.app.amqp.queues, Mock()
     try:
         self.worker.setup_queues({})
         self.app.amqp.queues.select_add.assert_called_with(worker_direct(self.worker.hostname))
     finally:
         self.app.amqp.queues = _qs
         self.app.conf.CELERY_WORKER_DIRECT = False
Пример #5
0
 def setup_queues(self, queues):
     if isinstance(queues, string_t):
         queues = queues.split(',')
     self.queues = queues
     try:
         self.app.select_queues(queues)
     except KeyError as exc:
         raise ImproperlyConfigured(UNKNOWN_QUEUE.format(queues, exc))
     if self.app.conf.CELERY_WORKER_DIRECT:
         self.app.amqp.queues.select_add(worker_direct(self.hostname))
Пример #6
0
 def setup_queues(self, queues):
     if isinstance(queues, string_t):
         queues = queues.split(",")
     self.queues = queues
     try:
         self.app.select_queues(queues)
     except KeyError as exc:
         raise ImproperlyConfigured(UNKNOWN_QUEUE.format(queues, exc))
     if self.app.conf.CELERY_WORKER_DIRECT:
         self.app.amqp.queues.select_add(worker_direct(self.hostname))
Пример #7
0
 def setup_queues(self, include, exclude=None):
     include = str_to_list(include)
     exclude = str_to_list(exclude)
     try:
         self.app.amqp.queues.select(include)
     except KeyError as exc:
         raise ImproperlyConfigured(SELECT_UNKNOWN_QUEUE.format(include, exc))
     try:
         self.app.amqp.queues.deselect(exclude)
     except KeyError as exc:
         raise ImproperlyConfigured(DESELECT_UNKNOWN_QUEUE.format(exclude, exc))
     if self.app.conf.CELERY_WORKER_DIRECT:
         self.app.amqp.queues.select_add(worker_direct(self.hostname))
Пример #8
0
 def setup_queues(self, include, exclude=None):
     include = str_to_list(include)
     exclude = str_to_list(exclude)
     try:
         self.app.amqp.queues.select(include)
     except KeyError as exc:
         raise ImproperlyConfigured(
             SELECT_UNKNOWN_QUEUE.format(include, exc))
     try:
         self.app.amqp.queues.deselect(exclude)
     except KeyError as exc:
         raise ImproperlyConfigured(
             DESELECT_UNKNOWN_QUEUE.format(exclude, exc))
     if self.app.conf.worker_direct:
         self.app.amqp.queues.select_add(worker_direct(self.hostname))
Пример #9
0
class Worker(configurated):
    WorkController = WorkController

    app = None
    inherit_confopts = (WorkController, )
    loglevel = from_config('log_level')
    redirect_stdouts = from_config()
    redirect_stdouts_level = from_config()

    def __init__(self, hostname=None, purge=False, beat=False,
                 queues=None, include=None, app=None, pidfile=None,
                 autoscale=None, autoreload=False, no_execv=False,
                 no_color=None, **kwargs):
        self.app = app = app_or_default(app or self.app)
        self.hostname = hostname or socket.gethostname()

        # this signal can be used to set up configuration for
        # workers by name.
        signals.celeryd_init.send(sender=self.hostname, instance=self,
                                  conf=self.app.conf)

        self.setup_defaults(kwargs, namespace='celeryd')
        if not self.concurrency:
            try:
                self.concurrency = cpu_count()
            except NotImplementedError:
                self.concurrency = 2
        self.purge = purge
        self.beat = beat
        self.use_queues = [] if queues is None else queues
        self.queues = None
        self.include = include
        self.pidfile = pidfile
        self.autoscale = None
        self.autoreload = autoreload
        self.no_color = no_color
        self.no_execv = no_execv
        if autoscale:
            max_c, _, min_c = autoscale.partition(',')
            self.autoscale = [int(max_c), min_c and int(min_c) or 0]
        self._isatty = isatty(sys.stdout)

        self.colored = app.log.colored(
            self.logfile,
            enabled=not no_color if no_color is not None else no_color
        )

        if isinstance(self.use_queues, basestring):
            self.use_queues = self.use_queues.split(',')
        if self.include:
            if isinstance(self.include, basestring):
                self.include = self.include.split(',')
            app.conf.CELERY_INCLUDE = (
                tuple(app.conf.CELERY_INCLUDE) + tuple(self.include))
        self.loglevel = mlevel(self.loglevel)

    def run(self):
        self.init_queues()
        self.app.loader.init_worker()

        # this signal can be used to e.g. change queues after
        # the -Q option has been applied.
        signals.celeryd_after_setup.send(sender=self.hostname, instance=self,
                                         conf=self.app.conf)

        if getattr(os, 'getuid', None) and os.getuid() == 0:
            warnings.warn(RuntimeWarning(
                'Running celeryd with superuser privileges is discouraged!'))

        if self.purge:
            self.purge_messages()

        # Dump configuration to screen so we have some basic information
        # for when users sends bug reports.
        print(str(self.colored.cyan(' \n', self.startup_info())) +
              str(self.colored.reset(self.extra_info() or '')))
        self.set_process_status('-active-')

        self.setup_logging()

        # apply task execution optimizations
        trace.setup_worker_optimizations(self.app)

        try:
            self.run_worker()
        except IGNORE_ERRORS:
            pass

    def on_consumer_ready(self, consumer):
        signals.worker_ready.send(sender=consumer)
        print('celery@%s ready.' % safe_str(self.hostname))

    def init_queues(self):
        try:
            self.app.select_queues(self.use_queues)
        except KeyError, exc:
            raise ImproperlyConfigured(UNKNOWN_QUEUE % (self.use_queues, exc))
        if self.app.conf.CELERY_WORKER_DIRECT:
            self.app.amqp.queues.select_add(worker_direct(self.hostname))
Пример #10
0
 def test_returns_if_queue(self):
     q = Queue('foo')
     self.assertIs(worker_direct(q), q)
Пример #11
0
 def test_returns_if_queue(self):
     q = Queue('foo')
     self.assertIs(worker_direct(q), q)
Пример #12
0
    def handle(self, *args, **options):

        from btw.settings._env import env

        workers = get_defined_workers()

        workers_by_name = {w.name: w for w in workers}

        def get_worker_names(force_all=False):
            get_all = force_all or options["all"]
            worker_names = options["worker_names"]
            if len(worker_names):
                for name in worker_names:
                    if name not in workers_by_name:
                        raise CommandError("{0} is not a worker name"
                                           .format(name))
            elif get_all:
                worker_names = [w.name for w in workers]
            else:
                raise CommandError("must specify a worker name or use --all")
            return worker_names

        cmd = options["command"]

        if cmd == "start":
            worker_names = get_worker_names()

            running_workers = get_running_workers()
            full_names = get_full_names(worker_names)
            requests = []
            for name in worker_names:
                worker = workers_by_name[name]

                if worker in running_workers:
                    self.error("{0} is already running.".format(name))
                    continue

                full_name = full_names[name]
                retcode = MultiTool().execute_from_commandline(
                    worker.start_cmd)
                if retcode:
                    self.error("there was an error starting {0}"
                               .format(name))
                # What we are doing here has more to do with waiting
                # for the worker to start rather than actually
                # checking the return value. It would be quite
                # difficult to get into a situation where the
                # environments do not coincide.

                # We send the task directly to the worker so that we
                # are sure *that* worker handles the request.
                requests.append((worker,
                                 get_btw_env.apply_async(
                                     (),
                                     queue=worker_direct(full_name))))

            for worker, request in requests:
                name = worker.name
                result = request.get()
                if result != env:
                    self.error(
                        ("{0}: not using environment {1} "
                         "(uses environment {2})")
                        .format(name, env, result))

                # We have to do this to unschedule the tasks that may
                # have been scheduled by a previous worker. If we do
                # not do this, we can end up with having gobs of tasks
                # scheduled for future execution. However, we cannot
                # do this when we stop the tasks. Why? Because the
                # list of revoked tasks is held only in memory and
                # will vanish when the workers are stopped.
                self.revoke_scheduled(full_names[name])

                if worker.start_task:
                    worker.start_task()

                self.stdout.write("{0} has started.".format(name))

        elif cmd == "names":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            for name in get_worker_names(force_all=True):
                self.stdout.write(name)

        elif cmd == "lognames":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            for w in workers:
                self.stdout.write(w.logfile)

        elif cmd == "stop":
            worker_names = get_worker_names()
            running_workers = get_running_workers()

            for name in worker_names:
                worker = workers_by_name[name]
                if worker in running_workers:
                    retcode = MultiTool().execute_from_commandline(
                        worker.stop_cmd)
                    if retcode:
                        self.error("there was an error stopping {0}"
                                   .format(name))
                    self.stdout.write("{0} has stopped.".format(name))
                else:
                    self.stdout.write("{0} was not running.".format(name))

        elif cmd == "ping":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            full_names = get_full_names([w.name for w in workers])

            for worker in workers:
                self.stdout.write("Pinging worker %s... " %
                                  worker.name, ending='')

                status = worker_does_not_exist(worker)
                if status:
                    self.stdout.write("failed: " + status)
                    continue

                full_name = full_names[worker.name]
                result = app.control.ping([full_name], timeout=0.5)
                if result[0][full_name] == {u'ok': u'pong'}:
                    self.stdout.write("passed")
                else:
                    self.stdout.write("failed with response: " +
                                      (repr(result[0].get(full_name)) or ""))

        elif cmd == "check":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            if not settings.CELERY_WORKER_DIRECT:
                # We need CELERY_WORKER_DIRECT so that the next test will work.
                raise CommandError("CELERY_WORKER_DIRECT must be True")

            full_names = get_full_names([w.name for w in workers])

            for worker in workers:
                self.stdout.write("Checking worker %s... " %
                                  worker.name, ending='')

                status = worker_does_not_exist(worker)
                if status:
                    self.stdout.write("failed: " + status)
                    continue

                full_name = full_names[worker.name]

                # We send the task directly to the worker so that we
                # are sure *that* worker handles the request.
                try:
                    result = get_btw_env.apply_async(
                        (),
                        queue=worker_direct(full_name)).get(timeout=60)
                except TimeoutError:
                    self.stdout.write("failed: timed out")
                    continue

                if result != env:
                    self.stdout.write(
                        ("failed: not using environment {0} "
                         "(uses environment {1})")
                        .format(env, result))
                    continue

                self.stdout.write("passed")
        elif cmd == "revoke-scheduled":
            self.revoke_scheduled()
        else:
            raise CommandError("bad command: " + cmd)
Пример #13
0
    def handle(self, *args, **options):

        from btw.settings._env import env

        workers = get_defined_workers()

        workers_by_name = {w.name: w for w in workers}

        def get_worker_names(force_all=False):
            get_all = force_all or options["all"]
            worker_names = options["worker_names"]
            if len(worker_names):
                for name in worker_names:
                    if name not in workers_by_name:
                        raise CommandError("{0} is not a worker name"
                                           .format(name))
            elif get_all:
                worker_names = [w.name for w in workers]
            else:
                raise CommandError("must specify a worker name or use --all")
            return worker_names

        cmd = options["command"]

        if cmd == "start":
            worker_names = get_worker_names()

            running_workers = get_running_workers()
            full_names = get_full_names(worker_names)
            requests = []
            for name in worker_names:
                worker = workers_by_name[name]

                if worker in running_workers:
                    self.error("{0} is already running.".format(name))
                    continue

                full_name = full_names[name]
                retcode = MultiTool().execute_from_commandline(
                    worker.start_cmd)
                if retcode:
                    self.error("there was an error starting {0}"
                               .format(name))
                # What we are doing here has more to do with waiting
                # for the worker to start rather than actually
                # checking the return value. It would be quite
                # difficult to get into a situation where the
                # environments do not coincide.

                # We send the task directly to the worker so that we
                # are sure *that* worker handles the request.
                requests.append((worker,
                                 get_btw_env.apply_async(
                                     (),
                                     queue=worker_direct(full_name))))

            for worker, request in requests:
                name = worker.name
                result = request.get()
                if result != env:
                    self.error(
                        ("{0}: not using environment {1} "
                         "(uses environment {2})")
                        .format(name, env, result))

                # We have to do this to unschedule the tasks that may
                # have been scheduled by a previous worker. If we do
                # not do this, we can end up with having gobs of tasks
                # scheduled for future execution. However, we cannot
                # do this when we stop the tasks. Why? Because the
                # list of revoked tasks is held only in memory and
                # will vanish when the workers are stopped.
                self.revoke_scheduled(full_names[name])

                if worker.start_task:
                    worker.start_task()

                self.stdout.write("{0} has started.".format(name))

        elif cmd == "names":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            for name in get_worker_names(force_all=True):
                self.stdout.write(name)

        elif cmd == "lognames":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            for w in workers:
                self.stdout.write(w.logfile)

        elif cmd == "stop":
            worker_names = get_worker_names()
            running_workers = get_running_workers()

            for name in worker_names:
                worker = workers_by_name[name]
                if worker in running_workers:
                    retcode = MultiTool().execute_from_commandline(
                        worker.stop_cmd)
                    if retcode:
                        self.error("there was an error stopping {0}"
                                   .format(name))
                    self.stdout.write("{0} has stopped.".format(name))
                else:
                    self.stdout.write("{0} was not running.".format(name))

        elif cmd == "ping":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            full_names = get_full_names([w.name for w in workers])

            for worker in workers:
                self.stdout.write("Pinging worker %s... " %
                                  worker.name, ending='')

                status = worker_does_not_exist(worker)
                if status:
                    self.stdout.write("failed: " + status)
                    continue

                full_name = full_names[worker.name]
                result = app.control.ping([full_name], timeout=0.5)
                if result[0][full_name] == {'ok': 'pong'}:
                    self.stdout.write("passed")
                else:
                    self.stdout.write("failed with response: " +
                                      (repr(result[0].get(full_name)) or ""))

        elif cmd == "check":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            if not settings.CELERY_WORKER_DIRECT:
                # We need CELERY_WORKER_DIRECT so that the next test will work.
                raise CommandError("CELERY_WORKER_DIRECT must be True")

            full_names = get_full_names([w.name for w in workers])

            for worker in workers:
                self.stdout.write("Checking worker %s... " %
                                  worker.name, ending='')

                status = worker_does_not_exist(worker)
                if status:
                    self.stdout.write("failed: " + status)
                    continue

                full_name = full_names[worker.name]

                # We send the task directly to the worker so that we
                # are sure *that* worker handles the request.
                try:
                    result = get_btw_env.apply_async(
                        (),
                        queue=worker_direct(full_name)).get(timeout=60)
                except TimeoutError:
                    self.stdout.write("failed: timed out")
                    continue

                if result != env:
                    self.stdout.write(
                        ("failed: not using environment {0} "
                         "(uses environment {1})")
                        .format(env, result))
                    continue

                self.stdout.write("passed")
        elif cmd == "revoke-scheduled":
            self.revoke_scheduled()
        else:
            raise CommandError("bad command: " + cmd)