Exemple #1
0
def worker_process(args, worker_id, logging_pipe, canteen):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        logger.debug("Loading broker...")
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        logger.debug("Loading modules...")
        for module in args.modules:
            importlib.import_module(module)

        with canteen_try_init(canteen) as acquired:
            if acquired:
                logger.debug("Sending forks to main process...")
                for middleware in broker.middleware:
                    for fork in middleware.forks:
                        fork_path = "%s:%s" % (fork.__module__, fork.__name__)
                        canteen_add(canteen, fork_path)

        logger.debug("Starting worker threads...")
        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Exemple #2
0
def test_redis_requeues_unhandled_messages_on_shutdown(redis_broker):
    # Given that I have an actor that takes its time
    @dramatiq.actor
    def do_work():
        time.sleep(1)

    # If I send it two messages
    message_1 = do_work.send()
    message_2 = do_work.send()

    # Then start a worker and subsequently shut it down
    worker = Worker(redis_broker, worker_threads=1)
    worker.start()
    time.sleep(0.25)
    worker.stop()

    # I expect it to have processed one of the messages and re-enqueued the other
    messages = redis_broker.client.lrange(f"dramatiq:{do_work.queue_name}", 0,
                                          10)
    if message_1.options["redis_message_id"].encode("utf-8") not in messages:
        assert message_2.options["redis_message_id"].encode(
            "utf-8") in messages

    else:
        assert message_1.options["redis_message_id"].encode(
            "utf-8") in messages
Exemple #3
0
def test_rabbitmq_broker_can_enqueue_messages_with_priority(rabbitmq_broker):
    max_priority = 10
    message_processing_order = []
    queue_name = "prioritized"

    # Given that I have an actor that store priorities
    @dramatiq.actor(queue_name=queue_name)
    def do_work(message_priority):
        message_processing_order.append(message_priority)

    worker = Worker(rabbitmq_broker, worker_threads=1)
    worker.queue_prefetch = 1
    worker.start()
    worker.pause()

    try:
        # When I send that actor messages with increasing priorities
        for priority in range(max_priority):
            do_work.send_with_options(args=(priority, ),
                                      broker_priority=priority)

        # And then tell the broker to wait for all messages
        worker.resume()
        rabbitmq_broker.join(queue_name, timeout=5000)
        worker.join()

        # I expect the stored priorities to be saved in decreasing order
        assert message_processing_order == list(reversed(range(max_priority)))
    finally:
        worker.stop()
Exemple #4
0
def test_rabbitmq_broker_retries_declaring_queues_when_connection_related_errors_occur(rabbitmq_broker):
    executed, declare_called = False, False
    original_declare = rabbitmq_broker._declare_queue

    def flaky_declare_queue(*args, **kwargs):
        nonlocal declare_called
        if not declare_called:
            declare_called = True
            raise pika.exceptions.AMQPConnectionError
        return original_declare(*args, **kwargs)

    # Given that I have a flaky connection to a rabbitmq server
    with patch.object(rabbitmq_broker, "_declare_queue", flaky_declare_queue):
        # When I declare an actor
        @dramatiq.actor(queue_name="flaky_queue")
        def do_work():
            nonlocal executed
            executed = True

        # And I send that actor a message
        do_work.send()

        # And wait for the worker to process the message
        worker = Worker(rabbitmq_broker, worker_threads=1)
        worker.start()

        try:
            rabbitmq_broker.join(do_work.queue_name, timeout=5000)
            worker.join()

            # Then the queue should eventually be declared and the message executed
            assert declare_called
            assert executed
        finally:
            worker.stop()
Exemple #5
0
def worker(*args, **kwargs):
    try:
        worker = Worker(*args, **kwargs)
        worker.start()
        yield worker
    finally:
        worker.stop()
Exemple #6
0
def test_actors_can_prioritize_work(stub_broker):
    # Given that I have a database of calls
    calls = []

    # And an actor with high priority
    @dramatiq.actor(priority=0)
    def hi():
        calls.append("hi")

    # And an actor with low priority
    @dramatiq.actor(priority=10)
    def lo():
        calls.append("lo")

    # If I send both actors a message
    lo.send_with_options()
    hi.send_with_options()

    # Then start a worker and join on their queue
    worker = Worker(stub_broker)
    worker.start()
    stub_broker.join("default")
    worker.join()
    worker.stop()

    # I expect the high priority worker to run first
    assert calls == ["hi", "lo"]
Exemple #7
0
class DramatiqTestCase(TransactionTestCase):
    def _pre_setup(self):
        super()._pre_setup()

        self.broker = get_broker()
        self.broker.flush_all()

        self.worker = Worker(self.broker, worker_timeout=100)
        self.worker.start()

    def _post_teardown(self):
        self.worker.stop()

        super()._post_teardown()
Exemple #8
0
def worker_process(args, worker_id, logging_pipe):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        for module in args.modules:
            importlib.import_module(module)

        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Exemple #9
0
def test_workers_dont_register_queues_that_arent_whitelisted(stub_broker):
    # Given that I have a worker object with a restricted set of queues
    worker = Worker(stub_broker, queues={"a", "b"})
    worker.start()

    try:
        # When I try to register a consumer for a queue that hasn't been whitelisted
        stub_broker.declare_queue("c")
        stub_broker.declare_queue("c.DQ")

        # Then a consumer should not get spun up for that queue
        assert "c" not in worker.consumers
        assert "c.DQ" not in worker.consumers
    finally:
        worker.stop()
Exemple #10
0
def worker_process(worker_id, logging_fd):
    """consume worker to process messages and execute the actor"""
    # TODO preload registries
    db_name = Configuration.get('db_name')
    try:
        logging_pipe = os.fdopen(logging_fd, "w")
        broker = prepare_broker(withmiddleware=True)
        broker.emit_after("process_boot")
        BlokManager.load()
        registry = RegistryManager.get(db_name, loadwithoutmigration=True)
        if registry is None:
            logger.critical("No registry found for %s", db_name)
            return os._exit(4)

        worker = Worker(
            broker, worker_threads=Configuration.get('dramatiq_threads', 1))
        worker.start()
        print('worker started')
    except ImportError as e:
        logger.critical(e)
        return os._exit(2)
    except ConnectionError as e:
        logger.critical("Broker connection failed. %s", e)
        return os._exit(3)

    def termhandler(signum, frame):
        nonlocal running
        BlokManager.unload()
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return os._exit(1)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    signal.signal(signal.SIGHUP, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Exemple #11
0
def test_redis_requeues_unhandled_delay_messages_on_shutdown(redis_broker):
    # Given that I have an actor that takes its time
    @dramatiq.actor
    def do_work():
        pass

    # If I send it a delayed message
    message = do_work.send_with_options(delay=10000)

    # Then start a worker and subsequently shut it down
    worker = Worker(redis_broker, worker_threads=1)
    worker.start()
    worker.stop()

    # I expect it to have re-enqueued the message
    messages = redis_broker.client.lrange("dramatiq:%s" % dq_name(do_work.queue_name), 0, 10)
    assert message.options["redis_message_id"].encode("utf-8") in messages
Exemple #12
0
def worker_process(args, worker_id, logging_fd):
    try:
        logging_pipe = os.fdopen(logging_fd, "w")
        logger = setup_worker_logging(args, worker_id, logging_pipe)
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        for module in args.modules:
            importlib.import_module(module)

        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return os._exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return os._exit(RET_CONNECT)

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return os._exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    signal.signal(signal.SIGHUP, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Exemple #13
0
def test_actors_can_be_assigned_message_age_limits(stub_broker):
    # Given that I have a database
    runs = []

    # And an actor whose messages have an age limit
    @dramatiq.actor(max_age=100)
    def do_work():
        runs.append(1)

    # If I send it a message
    do_work.send()

    # And join on its queue after the age limit has passed
    time.sleep(0.1)
    worker = Worker(stub_broker, worker_timeout=100)
    worker.start()
    stub_broker.join(do_work.queue_name)
    worker.join()
    worker.stop()

    # I expect the message to have been skipped
    assert sum(runs) == 0
Exemple #14
0
def stub_worker(stub_broker):
    worker = Worker(stub_broker, worker_timeout=100)
    worker.start()
    yield worker
    worker.stop()
def stub_worker(stub_broker: dramatiq.Broker) -> dramatiq.Worker:
    worker = Worker(stub_broker, worker_timeout=100, worker_threads=32)
    worker.start()
    yield worker
    worker.stop()
Exemple #16
0
def worker(broker: StubBroker) -> Generator[Worker, None, None]:
    worker = Worker(broker, worker_timeout=100)
    yield worker
    worker.stop()
Exemple #17
0
def redis_worker(redis_broker):
    worker = Worker(redis_broker, worker_threads=32)
    worker.start()
    yield worker
    worker.stop()
Exemple #18
0
def rabbitmq_worker(rabbitmq_broker):
    worker = Worker(rabbitmq_broker, worker_threads=32)
    worker.start()
    yield worker
    worker.stop()
Exemple #19
0
def stub_worker():
    worker = Worker(settings.broker, worker_timeout=100)
    worker.start()
    yield worker
    worker.stop()
Exemple #20
0
def worker(broker):
    worker = Worker(broker, worker_timeout=100)
    worker.start()
    yield worker
    worker.stop()
Exemple #21
0
def worker_process(args, worker_id, logging_pipe, canteen, event):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        logger.debug("Loading broker...")
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        logger.debug("Loading modules...")
        for module in args.modules:
            importlib.import_module(module)

        if not canteen.initialized:
            with canteen.get_lock():
                if not canteen.initialized:
                    logger.debug("Sending forks to main process...")
                    for middleware in broker.middleware:
                        for fork in middleware.forks:
                            fork_path = "%s:%s" % (fork.__module__,
                                                   fork.__name__)
                            canteen_add(canteen, fork_path)

        logger.debug("Starting worker threads...")
        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)
    finally:
        # Signal to the master process that this process has booted,
        # regardless of whether it failed or not.  If it did fail, the
        # worker process will realize that soon enough.
        event.set()

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running and not worker.restart_requested:
        time.sleep(1)

    if worker.restart_requested:
        logger.info("Requesting worker restart.")
    worker.stop()
    logger.info("Worker stopped.")
    broker.close()

    logging_pipe.close()
    if worker.restart_requested:
        sys.exit(RET_RESTART)
Exemple #22
0
def redis_worker(redis_broker):
    worker = Worker(redis_broker)
    worker.start()
    yield worker
    worker.stop()
Exemple #23
0
def urlrabbitmq_worker(urlrabbitmq_broker):
    worker = Worker(urlrabbitmq_broker)
    worker.start()
    yield worker
    worker.stop()
Exemple #24
0
def worker():
    worker = Worker(broker, worker_timeout=100, worker_threads=1)
    worker.start()
    yield worker
    worker.stop()