def worker_process(args, worker_id, logging_pipe, canteen): try: # Re-seed the random number generator from urandom on # supported platforms. This should make it so that worker # processes don't all follow the same sequence. random.seed() logger = setup_worker_logging(args, worker_id, logging_pipe) logger.debug("Loading broker...") module, broker = import_broker(args.broker) broker.emit_after("process_boot") logger.debug("Loading modules...") for module in args.modules: importlib.import_module(module) with canteen_try_init(canteen) as acquired: if acquired: logger.debug("Sending forks to main process...") for middleware in broker.middleware: for fork in middleware.forks: fork_path = "%s:%s" % (fork.__module__, fork.__name__) canteen_add(canteen, fork_path) logger.debug("Starting worker threads...") worker = Worker(broker, queues=args.queues, worker_threads=args.threads) worker.start() except ImportError: logger.exception("Failed to import module.") return sys.exit(RET_IMPORT) except ConnectionError: logger.exception("Broker connection failed.") return sys.exit(RET_CONNECT) def termhandler(signum, frame): nonlocal running if running: logger.info("Stopping worker process...") running = False else: logger.warning("Killing worker process...") return sys.exit(RET_KILLED) logger.info("Worker process is ready for action.") signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, termhandler) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, termhandler) if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, termhandler) running = True while running: time.sleep(1) worker.stop() broker.close() logging_pipe.close()
def test_redis_requeues_unhandled_messages_on_shutdown(redis_broker): # Given that I have an actor that takes its time @dramatiq.actor def do_work(): time.sleep(1) # If I send it two messages message_1 = do_work.send() message_2 = do_work.send() # Then start a worker and subsequently shut it down worker = Worker(redis_broker, worker_threads=1) worker.start() time.sleep(0.25) worker.stop() # I expect it to have processed one of the messages and re-enqueued the other messages = redis_broker.client.lrange(f"dramatiq:{do_work.queue_name}", 0, 10) if message_1.options["redis_message_id"].encode("utf-8") not in messages: assert message_2.options["redis_message_id"].encode( "utf-8") in messages else: assert message_1.options["redis_message_id"].encode( "utf-8") in messages
def worker(*args, **kwargs): try: worker = Worker(*args, **kwargs) worker.start() yield worker finally: worker.stop()
def test_rabbitmq_broker_retries_declaring_queues_when_connection_related_errors_occur(rabbitmq_broker): executed, declare_called = False, False original_declare = rabbitmq_broker._declare_queue def flaky_declare_queue(*args, **kwargs): nonlocal declare_called if not declare_called: declare_called = True raise pika.exceptions.AMQPConnectionError return original_declare(*args, **kwargs) # Given that I have a flaky connection to a rabbitmq server with patch.object(rabbitmq_broker, "_declare_queue", flaky_declare_queue): # When I declare an actor @dramatiq.actor(queue_name="flaky_queue") def do_work(): nonlocal executed executed = True # And I send that actor a message do_work.send() # And wait for the worker to process the message worker = Worker(rabbitmq_broker, worker_threads=1) worker.start() try: rabbitmq_broker.join(do_work.queue_name, timeout=5000) worker.join() # Then the queue should eventually be declared and the message executed assert declare_called assert executed finally: worker.stop()
def test_actors_can_prioritize_work(stub_broker): # Given that I have a database of calls calls = [] # And an actor with high priority @dramatiq.actor(priority=0) def hi(): calls.append("hi") # And an actor with low priority @dramatiq.actor(priority=10) def lo(): calls.append("lo") # If I send both actors a message lo.send_with_options() hi.send_with_options() # Then start a worker and join on their queue worker = Worker(stub_broker) worker.start() stub_broker.join("default") worker.join() worker.stop() # I expect the high priority worker to run first assert calls == ["hi", "lo"]
def test_rabbitmq_broker_can_enqueue_messages_with_priority(rabbitmq_broker): max_priority = 10 message_processing_order = [] queue_name = "prioritized" # Given that I have an actor that store priorities @dramatiq.actor(queue_name=queue_name) def do_work(message_priority): message_processing_order.append(message_priority) worker = Worker(rabbitmq_broker, worker_threads=1) worker.queue_prefetch = 1 worker.start() worker.pause() try: # When I send that actor messages with increasing priorities for priority in range(max_priority): do_work.send_with_options(args=(priority, ), broker_priority=priority) # And then tell the broker to wait for all messages worker.resume() rabbitmq_broker.join(queue_name, timeout=5000) worker.join() # I expect the stored priorities to be saved in decreasing order assert message_processing_order == list(reversed(range(max_priority))) finally: worker.stop()
def test_dramatiq_error( broker: Broker, worker: Worker, backend: WriterBackend, frozen_time: Any ) -> None: @dramatiq.actor(queue_name="test") def simple_task_error() -> None: raise ValueError("Expected") message = simple_task_error.send_with_options(time_limit=10000) assert backend.enqueued() == [ EnqueuedLog( type=LogType.ENQUEUED, timestamp=datetime.now(), job_id=message.message_id, task_id="simple_task_error", job=JobDetails( queue="test", task_path="tests.test_dramatiq.test_dramatiq_error.<locals>" ".simple_task_error", execute_at=None, args=[], kwargs={}, options={"time_limit": 10000}, ), ) ] assert backend.dequeued() == [] assert backend.completed() == [] worker.start() broker.join(simple_task_error.queue_name) worker.join() assert backend.dequeued() == [ DequeuedLog( job_id=message.message_id, task_id="simple_task_error", timestamp=datetime.now(), type=LogType.DEQUEUED, ) ] exceptions = backend.exception() for exception in exceptions: assert "Expected" in exception.exception exception.exception = "" assert exceptions == [ ExceptionLog( job_id=message.message_id, task_id="simple_task_error", timestamp=datetime.now(), type=LogType.EXCEPTION, exception="", ) ]
def test_dramatiq_completion( broker: StubBroker, worker: Worker, backend: WriterBackend, frozen_time: Any ) -> None: @dramatiq.actor(queue_name="test") def simple_task(a: str, b: str) -> str: return "hello" message = simple_task.send("a", b="b") assert backend.enqueued() == [ EnqueuedLog( type=LogType.ENQUEUED, timestamp=datetime.now(), job_id=message.message_id, task_id="simple_task", job=JobDetails( queue="test", task_path=( "tests.test_dramatiq.test_dramatiq_completion.<locals>" ".simple_task" ), execute_at=None, args=["a"], kwargs={"b": "b"}, options={}, ), ) ] assert backend.dequeued() == [] assert backend.completed() == [] worker.start() broker.join(simple_task.queue_name) worker.join() assert backend.dequeued() == [ DequeuedLog( job_id=message.message_id, task_id="simple_task", timestamp=datetime.now(), type=LogType.DEQUEUED, ) ] assert backend.completed() == [ CompletedLog( job_id=message.message_id, task_id="simple_task", timestamp=datetime.now(), result="hello", type=LogType.COMPLETED, ) ]
class DramatiqTestCase(TransactionTestCase): def _pre_setup(self): super()._pre_setup() self.broker = get_broker() self.broker.flush_all() self.worker = Worker(self.broker, worker_timeout=100) self.worker.start() def _post_teardown(self): self.worker.stop() super()._post_teardown()
def worker_process(args, worker_id, logging_pipe): try: # Re-seed the random number generator from urandom on # supported platforms. This should make it so that worker # processes don't all follow the same sequence. random.seed() logger = setup_worker_logging(args, worker_id, logging_pipe) module, broker = import_broker(args.broker) broker.emit_after("process_boot") for module in args.modules: importlib.import_module(module) worker = Worker(broker, queues=args.queues, worker_threads=args.threads) worker.start() except ImportError: logger.exception("Failed to import module.") return sys.exit(RET_IMPORT) except ConnectionError: logger.exception("Broker connection failed.") return sys.exit(RET_CONNECT) def termhandler(signum, frame): nonlocal running if running: logger.info("Stopping worker process...") running = False else: logger.warning("Killing worker process...") return sys.exit(RET_KILLED) logger.info("Worker process is ready for action.") signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, termhandler) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, termhandler) if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, termhandler) running = True while running: time.sleep(1) worker.stop() broker.close() logging_pipe.close()
def test_workers_dont_register_queues_that_arent_whitelisted(stub_broker): # Given that I have a worker object with a restricted set of queues worker = Worker(stub_broker, queues={"a", "b"}) worker.start() try: # When I try to register a consumer for a queue that hasn't been whitelisted stub_broker.declare_queue("c") stub_broker.declare_queue("c.DQ") # Then a consumer should not get spun up for that queue assert "c" not in worker.consumers assert "c.DQ" not in worker.consumers finally: worker.stop()
def worker_process(worker_id, logging_fd): """consume worker to process messages and execute the actor""" # TODO preload registries db_name = Configuration.get('db_name') try: logging_pipe = os.fdopen(logging_fd, "w") broker = prepare_broker(withmiddleware=True) broker.emit_after("process_boot") BlokManager.load() registry = RegistryManager.get(db_name, loadwithoutmigration=True) if registry is None: logger.critical("No registry found for %s", db_name) return os._exit(4) worker = Worker( broker, worker_threads=Configuration.get('dramatiq_threads', 1)) worker.start() print('worker started') except ImportError as e: logger.critical(e) return os._exit(2) except ConnectionError as e: logger.critical("Broker connection failed. %s", e) return os._exit(3) def termhandler(signum, frame): nonlocal running BlokManager.unload() if running: logger.info("Stopping worker process...") running = False else: logger.warning("Killing worker process...") return os._exit(1) logger.info("Worker process is ready for action.") signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, termhandler) signal.signal(signal.SIGHUP, termhandler) running = True while running: time.sleep(1) worker.stop() broker.close() logging_pipe.close()
def test_redis_requeues_unhandled_delay_messages_on_shutdown(redis_broker): # Given that I have an actor that takes its time @dramatiq.actor def do_work(): pass # If I send it a delayed message message = do_work.send_with_options(delay=10000) # Then start a worker and subsequently shut it down worker = Worker(redis_broker, worker_threads=1) worker.start() worker.stop() # I expect it to have re-enqueued the message messages = redis_broker.client.lrange("dramatiq:%s" % dq_name(do_work.queue_name), 0, 10) assert message.options["redis_message_id"].encode("utf-8") in messages
def worker_process(args, worker_id, logging_fd): try: logging_pipe = os.fdopen(logging_fd, "w") logger = setup_worker_logging(args, worker_id, logging_pipe) module, broker = import_broker(args.broker) broker.emit_after("process_boot") for module in args.modules: importlib.import_module(module) worker = Worker(broker, queues=args.queues, worker_threads=args.threads) worker.start() except ImportError: logger.exception("Failed to import module.") return os._exit(RET_IMPORT) except ConnectionError: logger.exception("Broker connection failed.") return os._exit(RET_CONNECT) def termhandler(signum, frame): nonlocal running if running: logger.info("Stopping worker process...") running = False else: logger.warning("Killing worker process...") return os._exit(RET_KILLED) logger.info("Worker process is ready for action.") signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, termhandler) signal.signal(signal.SIGHUP, termhandler) running = True while running: time.sleep(1) worker.stop() broker.close() logging_pipe.close()
def test_disabled_log( broker: Broker, worker: Worker, backend: WriterBackend, actor_log: Optional[bool], task_log: Optional[bool], log_expected: Optional[bool], ) -> None: @dramatiq.actor(queue_name="test", log=actor_log) def simple_task_with_log_option() -> None: pass simple_task_with_log_option.send_with_options(log=task_log) worker.start() broker.join(simple_task_with_log_option.queue_name) worker.join() expected = 1 if log_expected else 0 assert len(backend.dequeued()) == expected assert len(backend.completed()) == expected
def test_actors_can_be_assigned_message_age_limits(stub_broker): # Given that I have a database runs = [] # And an actor whose messages have an age limit @dramatiq.actor(max_age=100) def do_work(): runs.append(1) # If I send it a message do_work.send() # And join on its queue after the age limit has passed time.sleep(0.1) worker = Worker(stub_broker, worker_timeout=100) worker.start() stub_broker.join(do_work.queue_name) worker.join() worker.stop() # I expect the message to have been skipped assert sum(runs) == 0
def worker_process(args, worker_id, logging_pipe, canteen, event): try: # Re-seed the random number generator from urandom on # supported platforms. This should make it so that worker # processes don't all follow the same sequence. random.seed() logger = setup_worker_logging(args, worker_id, logging_pipe) logger.debug("Loading broker...") module, broker = import_broker(args.broker) broker.emit_after("process_boot") logger.debug("Loading modules...") for module in args.modules: importlib.import_module(module) if not canteen.initialized: with canteen.get_lock(): if not canteen.initialized: logger.debug("Sending forks to main process...") for middleware in broker.middleware: for fork in middleware.forks: fork_path = "%s:%s" % (fork.__module__, fork.__name__) canteen_add(canteen, fork_path) logger.debug("Starting worker threads...") worker = Worker(broker, queues=args.queues, worker_threads=args.threads) worker.start() except ImportError: logger.exception("Failed to import module.") return sys.exit(RET_IMPORT) except ConnectionError: logger.exception("Broker connection failed.") return sys.exit(RET_CONNECT) finally: # Signal to the master process that this process has booted, # regardless of whether it failed or not. If it did fail, the # worker process will realize that soon enough. event.set() def termhandler(signum, frame): nonlocal running if running: logger.info("Stopping worker process...") running = False else: logger.warning("Killing worker process...") return sys.exit(RET_KILLED) logger.info("Worker process is ready for action.") signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, termhandler) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, termhandler) if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, termhandler) running = True while running and not worker.restart_requested: time.sleep(1) if worker.restart_requested: logger.info("Requesting worker restart.") worker.stop() logger.info("Worker stopped.") broker.close() logging_pipe.close() if worker.restart_requested: sys.exit(RET_RESTART)
def worker(broker): worker = Worker(broker, worker_timeout=100) worker.start() yield worker worker.stop()
def stub_worker(stub_broker: dramatiq.Broker) -> dramatiq.Worker: worker = Worker(stub_broker, worker_timeout=100, worker_threads=32) worker.start() yield worker worker.stop()
def test_dramatiq_failed( broker: Broker, worker: Worker, backend: WriterBackend, frozen_time: Any ) -> None: class FailMessage(Middleware): def after_process_message( self, broker: Broker, message: Message, *, result: Any = None, exception: Optional[BaseException] = None, ) -> None: message.fail() @dramatiq.actor(queue_name="test") def simple_task_failed() -> None: return broker.add_middleware(FailMessage()) message = simple_task_failed.send() simple_task_failed.send_with_options(log=False) assert backend.enqueued() == [ EnqueuedLog( type=LogType.ENQUEUED, timestamp=datetime.now(), job_id=message.message_id, task_id="simple_task_failed", job=JobDetails( queue="test", task_path="tests.test_dramatiq.test_dramatiq_failed.<locals>" ".simple_task_failed", execute_at=None, args=[], kwargs={}, options={}, ), ) ] assert backend.dequeued() == [] assert backend.completed() == [] worker.start() broker.join(simple_task_failed.queue_name) worker.join() assert backend.dequeued() == [ DequeuedLog( job_id=message.message_id, task_id="simple_task_failed", timestamp=datetime.now(), type=LogType.DEQUEUED, ) ] exceptions = backend.exception() for exception in exceptions: assert "Failed" in exception.exception exception.exception = "" assert exceptions == [ ExceptionLog( job_id=message.message_id, task_id="simple_task_failed", timestamp=datetime.now(), type=LogType.EXCEPTION, exception="", ) ]
def redis_worker(redis_broker): worker = Worker(redis_broker, worker_threads=32) worker.start() yield worker worker.stop()
def rabbitmq_worker(rabbitmq_broker): worker = Worker(rabbitmq_broker, worker_threads=32) worker.start() yield worker worker.stop()
def stub_worker(stub_broker): worker = Worker(stub_broker, worker_timeout=100) worker.start() yield worker worker.stop()
def urlrabbitmq_worker(urlrabbitmq_broker): worker = Worker(urlrabbitmq_broker) worker.start() yield worker worker.stop()
def stub_worker(): worker = Worker(settings.broker, worker_timeout=100) worker.start() yield worker worker.stop()
def redis_worker(redis_broker): worker = Worker(redis_broker) worker.start() yield worker worker.stop()
def worker(): worker = Worker(broker, worker_timeout=100, worker_threads=1) worker.start() yield worker worker.stop()