def test_get_active_worker_queues(self) -> None: worker_queue_count = (len(QueueProcessingWorker.__subclasses__()) + len(EmailSendingWorker.__subclasses__()) + len(LoopQueueProcessingWorker.__subclasses__()) - 1) self.assertEqual(worker_queue_count, len(get_active_worker_queues())) self.assertEqual(1, len(get_active_worker_queues(queue_type='test')))
def test_get_active_worker_queues(self) -> None: test_queue_names = set(get_active_worker_queues(only_test_queues=True)) worker_queue_names = { queue_class.queue_name for base in [QueueProcessingWorker, EmailSendingWorker, LoopQueueProcessingWorker] for queue_class in base.__subclasses__() if not isabstract(queue_class) } # Verify that the set of active worker queues equals the set # of of subclasses without is_test_queue set. self.assertEqual(set(get_active_worker_queues()), worker_queue_names - test_queue_names)
def handle(self, *args: Any, **options: Any) -> None: logging.basicConfig() logger = logging.getLogger("process_queue") def exit_with_three(signal: int, frame: Optional[FrameType]) -> None: """ This process is watched by Django's autoreload, so exiting with status code 3 will cause this process to restart. """ logger.warning("SIGUSR1 received. Restarting this queue processor.") sys.exit(3) if not settings.USING_RABBITMQ: # Make the warning silent when running the tests if settings.TEST_SUITE: logger.info("Not using RabbitMQ queue workers in the test suite.") else: logger.error("Cannot run a queue processor when USING_RABBITMQ is False!") raise CommandError def run_threaded_workers(queues: List[str], logger: logging.Logger) -> None: cnt = 0 for queue_name in queues: if not settings.DEVELOPMENT: logger.info("launching queue worker thread %s", queue_name) cnt += 1 td = ThreadedWorker(queue_name, logger) td.start() assert len(queues) == cnt logger.info("%d queue worker threads were launched", cnt) if options["all"]: signal.signal(signal.SIGUSR1, exit_with_three) autoreload.run_with_reloader(run_threaded_workers, get_active_worker_queues(), logger) elif options["multi_threaded"]: signal.signal(signal.SIGUSR1, exit_with_three) queues = options["multi_threaded"] autoreload.run_with_reloader(run_threaded_workers, queues, logger) else: queue_name = options["queue_name"] worker_num = options["worker_num"] def signal_handler(signal: int, frame: Optional[FrameType]) -> None: logger.info("Worker %d disconnecting from queue %s", worker_num, queue_name) worker.stop() sys.exit(0) logger.info("Worker %d connecting to queue %s", worker_num, queue_name) with log_and_exit_if_exception(logger, queue_name, threaded=False): worker = get_worker(queue_name) with configure_scope() as scope: scope.set_tag("queue_worker", queue_name) scope.set_tag("worker_num", worker_num) worker.setup() signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGUSR1, signal_handler) worker.ENABLE_TIMEOUTS = True worker.start()
def handle(self, *args, **options): logging.basicConfig() logger = logging.getLogger('process_queue') if not settings.USING_RABBITMQ: logger.error("Cannot run a queue processor when USING_RABBITMQ is False!") sys.exit(1) if options['all']: for queue_name in get_active_worker_queues(): logger.info('launching queue worker thread ' + queue_name) td = Threaded_worker(queue_name) td.start() else: queue_name = options['queue_name'] worker_num = options['worker_num'] logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name)) worker = get_worker(queue_name) worker.setup() def signal_handler(signal, frame): logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name)) worker.stop() sys.exit(0) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) worker.start()
def test_get_active_worker_queues(self) -> None: test_queue_names = set(get_active_worker_queues(only_test_queues=True)) # Actually 6, but test_timeouts, which defines TimeoutWorker, # is called after this self.assertEqual(5, len(test_queue_names)) # This misses that TestWorker, defined in test_worker_noname # with no assign_queue, because it runs after this worker_queue_names = { queue_class.queue_name for base in [QueueProcessingWorker, EmailSendingWorker, LoopQueueProcessingWorker] for queue_class in base.__subclasses__() if not isabstract(queue_class) } self.assertEqual(set(get_active_worker_queues()), worker_queue_names - test_queue_names)
def handle(self, *args, **options): # type: (*Any, **Any) -> None logging.basicConfig() logger = logging.getLogger('process_queue') def exit_with_three(signal, frame): # type: (int, FrameType) -> None """ This process is watched by Django's autoreload, so exiting with status code 3 will cause this process to restart. """ logger.warn("SIGUSR1 received. Restarting this queue processor.") sys.exit(3) if not settings.USING_RABBITMQ: # Make the warning silent when running the tests if settings.TEST_SUITE: logger.info("Not using RabbitMQ queue workers in the test suite.") else: logger.error("Cannot run a queue processor when USING_RABBITMQ is False!") sys.exit(1) def run_threaded_workers(queues, logger): # type: (List[str], logging.Logger) -> None cnt = 0 for queue_name in queues: if not settings.DEVELOPMENT: logger.info('launching queue worker thread ' + queue_name) cnt += 1 td = Threaded_worker(queue_name) td.start() assert len(queues) == cnt logger.info('%d queue worker threads were launched' % (cnt,)) if options['all']: signal.signal(signal.SIGUSR1, exit_with_three) autoreload.main(run_threaded_workers, (get_active_worker_queues(), logger)) elif options['multi_threaded']: signal.signal(signal.SIGUSR1, exit_with_three) queues = options['multi_threaded'] autoreload.main(run_threaded_workers, (queues, logger)) else: queue_name = options['queue_name'] worker_num = options['worker_num'] logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name)) worker = get_worker(queue_name) worker.setup() def signal_handler(signal, frame): # type: (int, FrameType) -> None logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name)) worker.stop() sys.exit(0) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGUSR1, signal_handler) worker.start()
def run_threaded_workers(logger): # type: (logging.Logger) -> None cnt = 0 for queue_name in get_active_worker_queues(): if not settings.DEVELOPMENT: logger.info('launching queue worker thread ' + queue_name) cnt += 1 td = Threaded_worker(queue_name) td.start() logger.info('%d queue worker threads were launched' % (cnt,))
def run_threaded_workers(logger): # type: (logging.Logger) -> None cnt = 0 for queue_name in get_active_worker_queues(): if not settings.DEVELOPMENT: logger.info('launching queue worker thread ' + queue_name) cnt += 1 td = Threaded_worker(queue_name) td.start() logger.info('%d queue worker threads were launched' % (cnt, ))
def handle(self, *args: Any, **options: Any) -> None: logging.basicConfig() logger = logging.getLogger('process_queue') def exit_with_three(signal: int, frame: FrameType) -> None: """ This process is watched by Django's autoreload, so exiting with status code 3 will cause this process to restart. """ logger.warning("SIGUSR1 received. Restarting this queue processor.") sys.exit(3) if not settings.USING_RABBITMQ: # Make the warning silent when running the tests if settings.TEST_SUITE: logger.info("Not using RabbitMQ queue workers in the test suite.") else: logger.error("Cannot run a queue processor when USING_RABBITMQ is False!") raise CommandError def run_threaded_workers(queues: List[str], logger: logging.Logger) -> None: cnt = 0 for queue_name in queues: if not settings.DEVELOPMENT: logger.info('launching queue worker thread ' + queue_name) cnt += 1 td = Threaded_worker(queue_name) td.start() assert len(queues) == cnt logger.info('%d queue worker threads were launched' % (cnt,)) if options['all']: signal.signal(signal.SIGUSR1, exit_with_three) autoreload.main(run_threaded_workers, (get_active_worker_queues(), logger)) elif options['multi_threaded']: signal.signal(signal.SIGUSR1, exit_with_three) queues = options['multi_threaded'] autoreload.main(run_threaded_workers, (queues, logger)) else: queue_name = options['queue_name'] worker_num = options['worker_num'] logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name)) worker = get_worker(queue_name) worker.setup() def signal_handler(signal: int, frame: FrameType) -> None: logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name)) worker.stop() sys.exit(0) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGUSR1, signal_handler) worker.start()
def handle(self, *args: Any, **options: str) -> None: def purge_queue(queue_name: str) -> None: queue = SimpleQueueClient() queue.ensure_queue(queue_name, lambda: None) queue.channel.queue_purge(queue_name) if options['all']: for queue_name in get_active_worker_queues(): purge_queue(queue_name) print("All queues purged") elif not options['queue_name']: raise CommandError("Missing queue_name argument!") else: queue_name = options['queue_name'] if queue_name not in get_active_worker_queues(): raise CommandError("Unknown queue %s" % (queue_name,)) print("Purging queue %s" % (queue_name,)) purge_queue(queue_name) print("Done")
def test_get_active_worker_queues(self) -> None: test_queue_names = set(get_active_worker_queues(only_test_queues=True)) # Actually 6, but test_timeouts, which defines TimeoutWorker, # is called after this self.assertEqual(5, len(test_queue_names)) worker_queue_classes: Set[Type[QueueProcessingWorker]] = set() worker_queue_classes.update(QueueProcessingWorker.__subclasses__()) worker_queue_classes.update(EmailSendingWorker.__subclasses__()) worker_queue_classes.update(LoopQueueProcessingWorker.__subclasses__()) # Remove the abstract class worker_queue_classes -= set( [queue_processors.LoopQueueProcessingWorker]) # This misses that TestWorker, defined in test_worker_noname # with no assign_queue, because it runs after this worker_queue_names = set([x.queue_name for x in worker_queue_classes]) self.assertEqual(set(get_active_worker_queues()), worker_queue_names - test_queue_names)
def handle(self, *args: Any, **options: str) -> None: def purge_queue(queue_name: str) -> None: queue = SimpleQueueClient() queue.ensure_queue(queue_name, lambda: None) queue.channel.queue_purge(queue_name) if options['all']: for queue_name in get_active_worker_queues(): purge_queue(queue_name) print("All queues purged") elif not options['queue_name']: raise CommandError("Missing queue_name argument!") else: queue_name = options['queue_name'] if queue_name not in get_active_worker_queues(): raise CommandError("Unknown queue %s" % (queue_name, )) print("Purging queue %s" % (queue_name, )) purge_queue(queue_name) print("Done")
def handle(self, *args: Any, **options: str) -> None: def purge_queue(queue_name: str) -> None: queue = SimpleQueueClient() queue.ensure_queue(queue_name, lambda channel: channel.queue_purge(queue_name)) if options['all']: for queue_name in get_active_worker_queues(): purge_queue(queue_name) print("All queues purged") elif not options['queue_name']: raise CommandError("Missing queue_name argument!") else: queue_name = options['queue_name'] if not (queue_name in get_active_worker_queues() or queue_name.startswith("notify_tornado")): raise CommandError(f"Unknown queue {queue_name}") print(f"Purging queue {queue_name}") purge_queue(queue_name) print("Done")
def handle(self, *args, **options): # type: (*Any, **Any) -> None logging.basicConfig() logger = logging.getLogger('process_queue') if not settings.USING_RABBITMQ: # Make the warning silent when running the tests if settings.TEST_SUITE: logger.info("Not using RabbitMQ queue workers in the test suite.") else: logger.error("Cannot run a queue processor when USING_RABBITMQ is False!") sys.exit(1) def run_threaded_workers(queues, logger): # type: (List[str], logging.Logger) -> None cnt = 0 for queue_name in queues: if not settings.DEVELOPMENT: logger.info('launching queue worker thread ' + queue_name) cnt += 1 td = Threaded_worker(queue_name) td.start() assert len(queues) == cnt logger.info('%d queue worker threads were launched' % (cnt,)) if options['all']: autoreload.main(run_threaded_workers, (get_active_worker_queues(), logger)) elif options['multi_threaded']: queues = options['multi_threaded'] autoreload.main(run_threaded_workers, (queues, logger)) else: queue_name = options['queue_name'] worker_num = options['worker_num'] logger.info("Worker %d connecting to queue %s" % (worker_num, queue_name)) worker = get_worker(queue_name) worker.setup() def signal_handler(signal, frame): # type: (int, FrameType) -> None logger.info("Worker %d disconnecting from queue %s" % (worker_num, queue_name)) worker.stop() sys.exit(0) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) worker.start()
#!/usr/bin/env python from __future__ import print_function import argparse import os import sys from os.path import dirname, abspath BASE_DIR = dirname(dirname(dirname(abspath(__file__)))) sys.path.append(BASE_DIR) import scripts.lib.setup_path_on_import os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings' import django django.setup() from zerver.worker.queue_processors import get_active_worker_queues if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--queue-type', action='store', dest='queue_type', default=None, help="Specify which types of queues to list") args = parser.parse_args() for worker in sorted(get_active_worker_queues(args.queue_type)): print(worker)
#!/usr/bin/env python3 import os import sys BASE_DIR = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(BASE_DIR) from scripts.lib.setup_path import setup_path setup_path() os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings' import django django.setup() from zerver.worker.queue_processors import get_active_worker_queues if __name__ == "__main__": for worker in sorted(get_active_worker_queues()): print(worker)
def test_get_active_worker_queues(self) -> None: worker_queue_count = (len(QueueProcessingWorker.__subclasses__()) + len(EmailSendingWorker.__subclasses__()) + len(LoopQueueProcessingWorker.__subclasses__()) - 1) self.assertEqual(worker_queue_count, len(get_active_worker_queues())) self.assertEqual(1, len(get_active_worker_queues(queue_type='test')))
def run_threaded_workers(logger): # type: (logging.Logger) -> None for queue_name in get_active_worker_queues(): logger.info('launching queue worker thread ' + queue_name) td = Threaded_worker(queue_name) td.start()
#!/usr/bin/env python3 from __future__ import print_function import argparse import os import sys from os.path import dirname, abspath BASE_DIR = dirname(dirname(dirname(abspath(__file__)))) sys.path.append(BASE_DIR) import scripts.lib.setup_path_on_import os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings' import django django.setup() from zerver.worker.queue_processors import get_active_worker_queues if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--queue-type', action='store', dest='queue_type', default=None, help="Specify which types of queues to list") args = parser.parse_args() for worker in sorted(get_active_worker_queues(args.queue_type)): print(worker)
def run_threaded_workers(logger): # type: (logging.Logger) -> None for queue_name in get_active_worker_queues(): logger.info('launching queue worker thread ' + queue_name) td = Threaded_worker(queue_name) td.start()