예제 #1
0
 def run_from_argv(self, prog_name, argv, command=None):
     from celery.bin.multi import MultiTool
     argv.append("--cmd=%s worker --detach" % prog_name)
     return MultiTool().execute_from_commandline(
         [command] + argv,
         prog_name,
     )
예제 #2
0
파일: celery.py 프로젝트: zzkristy/celery
 def run_from_argv(self, prog_name, argv, command=None):
     from celery.bin.multi import MultiTool
     multi = MultiTool(quiet=self.quiet, no_color=self.no_color)
     return multi.execute_from_commandline(
         [command] + argv,
         prog_name,
     )
예제 #3
0
 def test_range_prefix_not_used_in_named_range(self):
     m = MultiTool()
     range_prefix = 'worker'
     _opt_parser, nodes = m._nodes_from_argv(
         ['a b c', '--range-prefix={}'.format(range_prefix)])
     for i, node in enumerate(nodes, start=1):
         assert not node.name.startswith(range_prefix)
예제 #4
0
 def test_range_prefix_not_set(self):
     m = MultiTool()
     default_prefix = 'celery'
     workers_count = 2
     _opt_parser, nodes = m._nodes_from_argv(['{}'.format(workers_count)])
     for i, node in enumerate(nodes, start=1):
         assert node.name.startswith(default_prefix + str(i))
예제 #5
0
파일: test_multi.py 프로젝트: csunny/celery
 def test_splash(self):
     x = MultiTool()
     x.note = Mock()
     x.nosplash = True
     x.splash()
     x.note.assert_not_called()
     x.nosplash = False
     x.splash()
     x.note.assert_called()
예제 #6
0
 def test_range_prefix(self):
     m = MultiTool()
     range_prefix = 'worker'
     workers_count = 2
     _opt_parser, nodes = m._nodes_from_argv([
         '{}'.format(workers_count),
         '--range-prefix={}'.format(range_prefix)
     ])
     for i, node in enumerate(nodes, start=1):
         assert node.name.startswith(range_prefix + str(i))
예제 #7
0
파일: test_multi.py 프로젝트: csunny/celery
 def setup(self):
     self.fh = WhateverIO()
     self.env = {}
     self.t = MultiTool(env=self.env, fh=self.fh)
     self.t.Cluster = Mock(name='Cluster')
     self.t.carp = Mock(name='.carp')
     self.t.usage = Mock(name='.usage')
     self.t.splash = Mock(name='.splash')
     self.t.say = Mock(name='.say')
     self.t.ok = Mock(name='.ok')
     self.cluster = self.t.Cluster.return_value
예제 #8
0
 def test_Cluster(self):
     m = MultiTool()
     c = m.cluster_from_argv(['A', 'B', 'C'])
     assert c.env is m.env
     assert c.cmd == 'celery worker'
     assert c.on_stopping_preamble == m.on_stopping_preamble
     assert c.on_send_signal == m.on_send_signal
     assert c.on_still_waiting_for == m.on_still_waiting_for
     assert c.on_still_waiting_progress == m.on_still_waiting_progress
     assert c.on_still_waiting_end == m.on_still_waiting_end
     assert c.on_node_start == m.on_node_start
     assert c.on_node_restart == m.on_node_restart
     assert c.on_node_shutdown_ok == m.on_node_shutdown_ok
     assert c.on_node_status == m.on_node_status
     assert c.on_node_signal_dead == m.on_node_signal_dead
     assert c.on_node_signal == m.on_node_signal
     assert c.on_node_down == m.on_node_down
     assert c.on_child_spawn == m.on_child_spawn
     assert c.on_child_signalled == m.on_child_signalled
     assert c.on_child_failure == m.on_child_failure
예제 #9
0
    def setup(self):
        self.fh = WhateverIO()
        self.env = {}
        self.t = MultiTool(env=self.env, fh=self.fh)
        self.t.cluster_from_argv = Mock(name='cluster_from_argv')
        self.t._cluster_from_argv = Mock(name='cluster_from_argv')
        self.t.Cluster = Mock(name='Cluster')
        self.t.carp = Mock(name='.carp')
        self.t.usage = Mock(name='.usage')
        self.t.splash = Mock(name='.splash')
        self.t.say = Mock(name='.say')
        self.t.ok = Mock(name='.ok')
        self.cluster = self.t.Cluster.return_value

        def _cluster_from_argv(argv):
            p = self.t.OptionParser(argv)
            p.parse()
            return p, self.cluster

        self.t.cluster_from_argv.return_value = self.cluster
        self.t._cluster_from_argv.side_effect = _cluster_from_argv
예제 #10
0
파일: test_multi.py 프로젝트: csunny/celery
 def test_Cluster(self):
     m = MultiTool()
     c = m.Cluster(['A', 'B', 'C'])
     self.assertListEqual(c.argv, ['A', 'B', 'C'])
     self.assertIs(c.env, m.env)
     self.assertEqual(c.cmd, 'celery worker')
     self.assertEqual(c.on_stopping_preamble, m.on_stopping_preamble)
     self.assertEqual(c.on_send_signal, m.on_send_signal)
     self.assertEqual(c.on_still_waiting_for, m.on_still_waiting_for)
     self.assertEqual(
         c.on_still_waiting_progress,
         m.on_still_waiting_progress,
     )
     self.assertEqual(c.on_still_waiting_end, m.on_still_waiting_end)
     self.assertEqual(c.on_node_start, m.on_node_start)
     self.assertEqual(c.on_node_restart, m.on_node_restart)
     self.assertEqual(c.on_node_shutdown_ok, m.on_node_shutdown_ok)
     self.assertEqual(c.on_node_status, m.on_node_status)
     self.assertEqual(c.on_node_signal_dead, m.on_node_signal_dead)
     self.assertEqual(c.on_node_signal, m.on_node_signal)
     self.assertEqual(c.on_node_down, m.on_node_down)
     self.assertEqual(c.on_child_spawn, m.on_child_spawn)
     self.assertEqual(c.on_child_signalled, m.on_child_signalled)
     self.assertEqual(c.on_child_failure, m.on_child_failure)
예제 #11
0
파일: celery.py 프로젝트: llonchj/celery
 def run_from_argv(self, prog_name, argv):
     from celery.bin.multi import MultiTool
     return MultiTool().execute_from_commandline(argv, prog_name)
예제 #12
0
파일: test_multi.py 프로젝트: csunny/celery
 def setup(self):
     self.fh = WhateverIO()
     self.env = {}
     self.t = MultiTool(env=self.env, fh=self.fh)
예제 #13
0
 def setup(self):
     self.fh = WhateverIO()
     self.env = {}
     with patch('celery.apps.multi.os.mkdir'):
         self.t = MultiTool(env=self.env, fh=self.fh)
예제 #14
0
    def handle(self, *args, **options):

        from btw.settings._env import env

        workers = get_defined_workers()

        workers_by_name = {w.name: w for w in workers}

        def get_worker_names(force_all=False):
            get_all = force_all or options["all"]
            worker_names = options["worker_names"]
            if len(worker_names):
                for name in worker_names:
                    if name not in workers_by_name:
                        raise CommandError("{0} is not a worker name"
                                           .format(name))
            elif get_all:
                worker_names = [w.name for w in workers]
            else:
                raise CommandError("must specify a worker name or use --all")
            return worker_names

        cmd = options["command"]

        if cmd == "start":
            worker_names = get_worker_names()

            running_workers = get_running_workers()
            full_names = get_full_names(worker_names)
            requests = []
            for name in worker_names:
                worker = workers_by_name[name]

                if worker in running_workers:
                    self.error("{0} is already running.".format(name))
                    continue

                full_name = full_names[name]
                retcode = MultiTool().execute_from_commandline(
                    worker.start_cmd)
                if retcode:
                    self.error("there was an error starting {0}"
                               .format(name))
                # What we are doing here has more to do with waiting
                # for the worker to start rather than actually
                # checking the return value. It would be quite
                # difficult to get into a situation where the
                # environments do not coincide.

                # We send the task directly to the worker so that we
                # are sure *that* worker handles the request.
                requests.append((worker,
                                 get_btw_env.apply_async(
                                     (),
                                     queue=worker_direct(full_name))))

            for worker, request in requests:
                name = worker.name
                result = request.get()
                if result != env:
                    self.error(
                        ("{0}: not using environment {1} "
                         "(uses environment {2})")
                        .format(name, env, result))

                # We have to do this to unschedule the tasks that may
                # have been scheduled by a previous worker. If we do
                # not do this, we can end up with having gobs of tasks
                # scheduled for future execution. However, we cannot
                # do this when we stop the tasks. Why? Because the
                # list of revoked tasks is held only in memory and
                # will vanish when the workers are stopped.
                self.revoke_scheduled(full_names[name])

                if worker.start_task:
                    worker.start_task()

                self.stdout.write("{0} has started.".format(name))

        elif cmd == "names":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            for name in get_worker_names(force_all=True):
                self.stdout.write(name)

        elif cmd == "lognames":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            for w in workers:
                self.stdout.write(w.logfile)

        elif cmd == "stop":
            worker_names = get_worker_names()
            running_workers = get_running_workers()

            for name in worker_names:
                worker = workers_by_name[name]
                if worker in running_workers:
                    retcode = MultiTool().execute_from_commandline(
                        worker.stop_cmd)
                    if retcode:
                        self.error("there was an error stopping {0}"
                                   .format(name))
                    self.stdout.write("{0} has stopped.".format(name))
                else:
                    self.stdout.write("{0} was not running.".format(name))

        elif cmd == "ping":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            full_names = get_full_names([w.name for w in workers])

            for worker in workers:
                self.stdout.write("Pinging worker %s... " %
                                  worker.name, ending='')

                status = worker_does_not_exist(worker)
                if status:
                    self.stdout.write("failed: " + status)
                    continue

                full_name = full_names[worker.name]
                result = app.control.ping([full_name], timeout=0.5)
                if result[0][full_name] == {'ok': 'pong'}:
                    self.stdout.write("passed")
                else:
                    self.stdout.write("failed with response: " +
                                      (repr(result[0].get(full_name)) or ""))

        elif cmd == "check":
            check_no_names(options, cmd)
            check_no_all(options, cmd)

            if not settings.CELERY_WORKER_DIRECT:
                # We need CELERY_WORKER_DIRECT so that the next test will work.
                raise CommandError("CELERY_WORKER_DIRECT must be True")

            full_names = get_full_names([w.name for w in workers])

            for worker in workers:
                self.stdout.write("Checking worker %s... " %
                                  worker.name, ending='')

                status = worker_does_not_exist(worker)
                if status:
                    self.stdout.write("failed: " + status)
                    continue

                full_name = full_names[worker.name]

                # We send the task directly to the worker so that we
                # are sure *that* worker handles the request.
                try:
                    result = get_btw_env.apply_async(
                        (),
                        queue=worker_direct(full_name)).get(timeout=60)
                except TimeoutError:
                    self.stdout.write("failed: timed out")
                    continue

                if result != env:
                    self.stdout.write(
                        ("failed: not using environment {0} "
                         "(uses environment {1})")
                        .format(env, result))
                    continue

                self.stdout.write("passed")
        elif cmd == "revoke-scheduled":
            self.revoke_scheduled()
        else:
            raise CommandError("bad command: " + cmd)