Ejemplo n.º 1
0
def test_abort_notifications_are_received(
    stub_broker: dramatiq.Broker,
    stub_worker: dramatiq.Worker,
    event_backend: EventBackend,
) -> None:
    # Given that I have a database
    aborts, successes = [], []

    abortable = Abortable(backend=event_backend)
    stub_broker.add_middleware(abortable)

    # And an actor that handles shutdown interrupts
    @dramatiq.actor(abortable=True, max_retries=0)
    def do_work() -> None:
        try:
            for _ in range(10):
                time.sleep(0.1)
        except Abort:
            aborts.append(1)
            raise
        successes.append(1)

    stub_broker.emit_after("process_boot")

    # If I send it a message
    message = do_work.send()

    # Then wait and signal the task to terminate
    time.sleep(0.1)
    abort(message.message_id)

    # Then join on the queue
    stub_broker.join(do_work.queue_name)
    stub_worker.join()
Ejemplo n.º 2
0
def test_not_abortable(
    stub_broker: dramatiq.Broker,
    stub_worker: dramatiq.Worker,
    stub_event_backend: EventBackend,
) -> None:
    aborts, successes = [], []
    abortable = Abortable(backend=stub_event_backend)
    stub_broker.add_middleware(abortable)

    @dramatiq.actor(abortable=False)
    def not_abortable() -> None:
        try:
            for _ in range(10):
                time.sleep(0.1)
        except Abort:
            aborts.append(1)
            raise
        successes.append(1)

    stub_broker.emit_after("process_boot")

    # If I send it a message
    message = not_abortable.send()

    # Then wait and signal the task to terminate
    time.sleep(0.1)
    abort(message.message_id)

    # Then join on the queue
    stub_broker.join(not_abortable.queue_name)
    stub_worker.join()

    # I expect it to shutdown
    assert sum(aborts) == 0
    assert sum(successes) == 1
Ejemplo n.º 3
0
def test_abort_polling(
    stub_broker: dramatiq.Broker,
    stub_worker: dramatiq.Worker,
    stub_event_backend: EventBackend,
) -> None:
    sentinel = []
    abortable = Abortable(backend=stub_event_backend)
    stub_broker.add_middleware(abortable)

    @dramatiq.actor(abortable=True, max_retries=0)
    def abort_with_delay() -> None:
        try:
            sentinel.append(True)
            time.sleep(5)
            abortable.abort(message.message_id)
            for _ in range(20):
                time.sleep(0.1)
            sentinel.append(True)
        except Abort:
            sentinel.append(False)
            raise
        sentinel.append(True)

    stub_broker.emit_after("process_boot")

    # If I send it a message
    message = abort_with_delay.send()

    # Then join on the queue
    stub_broker.join(abort_with_delay.queue_name)
    stub_worker.join()

    # I expect it to shutdown
    assert sentinel == [True, False]
Ejemplo n.º 4
0
    def _pre_setup(self):
        super()._pre_setup()

        self.broker = get_broker()
        self.broker.flush_all()

        self.worker = Worker(self.broker, worker_timeout=100)
        self.worker.start()
Ejemplo n.º 5
0
def test_dramatiq_error(
    broker: Broker, worker: Worker, backend: WriterBackend, frozen_time: Any
) -> None:
    @dramatiq.actor(queue_name="test")
    def simple_task_error() -> None:
        raise ValueError("Expected")

    message = simple_task_error.send_with_options(time_limit=10000)

    assert backend.enqueued() == [
        EnqueuedLog(
            type=LogType.ENQUEUED,
            timestamp=datetime.now(),
            job_id=message.message_id,
            task_id="simple_task_error",
            job=JobDetails(
                queue="test",
                task_path="tests.test_dramatiq.test_dramatiq_error.<locals>"
                ".simple_task_error",
                execute_at=None,
                args=[],
                kwargs={},
                options={"time_limit": 10000},
            ),
        )
    ]

    assert backend.dequeued() == []
    assert backend.completed() == []

    worker.start()
    broker.join(simple_task_error.queue_name)
    worker.join()

    assert backend.dequeued() == [
        DequeuedLog(
            job_id=message.message_id,
            task_id="simple_task_error",
            timestamp=datetime.now(),
            type=LogType.DEQUEUED,
        )
    ]
    exceptions = backend.exception()
    for exception in exceptions:
        assert "Expected" in exception.exception
        exception.exception = ""
    assert exceptions == [
        ExceptionLog(
            job_id=message.message_id,
            task_id="simple_task_error",
            timestamp=datetime.now(),
            type=LogType.EXCEPTION,
            exception="",
        )
    ]
Ejemplo n.º 6
0
def test_dramatiq_completion(
    broker: StubBroker, worker: Worker, backend: WriterBackend, frozen_time: Any
) -> None:
    @dramatiq.actor(queue_name="test")
    def simple_task(a: str, b: str) -> str:
        return "hello"

    message = simple_task.send("a", b="b")

    assert backend.enqueued() == [
        EnqueuedLog(
            type=LogType.ENQUEUED,
            timestamp=datetime.now(),
            job_id=message.message_id,
            task_id="simple_task",
            job=JobDetails(
                queue="test",
                task_path=(
                    "tests.test_dramatiq.test_dramatiq_completion.<locals>"
                    ".simple_task"
                ),
                execute_at=None,
                args=["a"],
                kwargs={"b": "b"},
                options={},
            ),
        )
    ]

    assert backend.dequeued() == []
    assert backend.completed() == []

    worker.start()
    broker.join(simple_task.queue_name)
    worker.join()

    assert backend.dequeued() == [
        DequeuedLog(
            job_id=message.message_id,
            task_id="simple_task",
            timestamp=datetime.now(),
            type=LogType.DEQUEUED,
        )
    ]
    assert backend.completed() == [
        CompletedLog(
            job_id=message.message_id,
            task_id="simple_task",
            timestamp=datetime.now(),
            result="hello",
            type=LogType.COMPLETED,
        )
    ]
Ejemplo n.º 7
0
def test_rabbitmq_broker_retries_declaring_queues_when_connection_related_errors_occur(rabbitmq_broker):
    executed, declare_called = False, False
    original_declare = rabbitmq_broker._declare_queue

    def flaky_declare_queue(*args, **kwargs):
        nonlocal declare_called
        if not declare_called:
            declare_called = True
            raise pika.exceptions.AMQPConnectionError
        return original_declare(*args, **kwargs)

    # Given that I have a flaky connection to a rabbitmq server
    with patch.object(rabbitmq_broker, "_declare_queue", flaky_declare_queue):
        # When I declare an actor
        @dramatiq.actor(queue_name="flaky_queue")
        def do_work():
            nonlocal executed
            executed = True

        # And I send that actor a message
        do_work.send()

        # And wait for the worker to process the message
        worker = Worker(rabbitmq_broker, worker_threads=1)
        worker.start()

        try:
            rabbitmq_broker.join(do_work.queue_name, timeout=5000)
            worker.join()

            # Then the queue should eventually be declared and the message executed
            assert declare_called
            assert executed
        finally:
            worker.stop()
Ejemplo n.º 8
0
class DramatiqTestCase(TransactionTestCase):
    def _pre_setup(self):
        super()._pre_setup()

        self.broker = get_broker()
        self.broker.flush_all()

        self.worker = Worker(self.broker, worker_timeout=100)
        self.worker.start()

    def _post_teardown(self):
        self.worker.stop()

        super()._post_teardown()
Ejemplo n.º 9
0
def test_actors_can_prioritize_work(stub_broker):
    # Given that I have a database of calls
    calls = []

    # And an actor with high priority
    @dramatiq.actor(priority=0)
    def hi():
        calls.append("hi")

    # And an actor with low priority
    @dramatiq.actor(priority=10)
    def lo():
        calls.append("lo")

    # If I send both actors a message
    lo.send_with_options()
    hi.send_with_options()

    # Then start a worker and join on their queue
    worker = Worker(stub_broker)
    worker.start()
    stub_broker.join("default")
    worker.join()
    worker.stop()

    # I expect the high priority worker to run first
    assert calls == ["hi", "lo"]
    def test_create_range_for_stats_async(
        self,
        transactional_db,
        broker: stub.StubBroker,
        worker: dramatiq.Worker,
        hosting_provider_with_sample_user: ac_models.Hostingprovider,
        green_ip: gc_models.GreencheckIp,
        client,
    ):
        """
        Create a collection of daily stats, for a range of dates provided.

        """
        broker.declare_queue("default")
        generated_dates = self._set_up_dates_for_last_week()

        for date in generated_dates:
            gc = gc_factories.GreencheckFactory.create(date=date +
                                                       relativedelta(hours=2))
            # logger.info(f"gc {date}: {gc.__dict__}")

        logger.info(f"just this date: { generated_dates[0] }")

        gc_models.DailyStat.create_jobs_for_date_range_async(
            generated_dates, "total_count")

        # Wait for all the tasks to be processed
        broker.join("default")
        worker.join()

        green_stats = gc_models.DailyStat.objects.filter(
            green=gc_choices.BoolChoice.YES)
        grey_stats = gc_models.DailyStat.objects.filter(
            green=gc_choices.BoolChoice.NO)
        mixed_stats = gc_models.DailyStat.objects.exclude(
            green__in=[gc_choices.BoolChoice.YES, gc_choices.BoolChoice.NO])

        # have we generated the expected stats per day?
        assert green_stats.count() == 7
        assert grey_stats.count() == 7
        assert mixed_stats.count() == 7

        # we should one count showing zero green checks for each day
        assert [stat.count for stat in green_stats] == [0, 0, 0, 0, 0, 0, 0]

        # mixed and grey should be the same
        assert [stat.count for stat in grey_stats] == [1, 1, 1, 1, 1, 1, 1]
        assert [stat.count for stat in grey_stats] == [1, 1, 1, 1, 1, 1, 1]
Ejemplo n.º 11
0
def test_disabled_log(
    broker: Broker,
    worker: Worker,
    backend: WriterBackend,
    actor_log: Optional[bool],
    task_log: Optional[bool],
    log_expected: Optional[bool],
) -> None:
    @dramatiq.actor(queue_name="test", log=actor_log)
    def simple_task_with_log_option() -> None:
        pass

    simple_task_with_log_option.send_with_options(log=task_log)

    worker.start()
    broker.join(simple_task_with_log_option.queue_name)
    worker.join()

    expected = 1 if log_expected else 0
    assert len(backend.dequeued()) == expected
    assert len(backend.completed()) == expected
Ejemplo n.º 12
0
def test_cancel_notifications_are_received(
    stub_broker: dramatiq.Broker,
    stub_worker: dramatiq.Worker,
    event_backend: EventBackend,
) -> None:
    # Given that I have a database
    aborts, successes = [], []

    abortable = Abortable(backend=event_backend)
    stub_broker.add_middleware(abortable)
    test_event = Event()

    # And an actor that handles shutdown interrupts
    @dramatiq.actor(abortable=True, max_retries=0)
    def do_work() -> None:
        try:
            test_event.set()
            for _ in range(10):
                time.sleep(0.1)
        except Abort:
            aborts.append(1)
            raise
        successes.append(1)

    stub_broker.emit_after("process_boot")

    # If I send it a message
    message = do_work.send()

    # Then wait
    test_event.wait()
    abort(message.message_id, mode=AbortMode.CANCEL)

    # Then join on the queue
    stub_broker.join(do_work.queue_name)
    stub_worker.join()

    # Task will finished, the cancel won't take any effect.
    assert successes
    assert not aborts
Ejemplo n.º 13
0
def worker_process(args, worker_id, logging_pipe, canteen):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        logger.debug("Loading broker...")
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        logger.debug("Loading modules...")
        for module in args.modules:
            importlib.import_module(module)

        with canteen_try_init(canteen) as acquired:
            if acquired:
                logger.debug("Sending forks to main process...")
                for middleware in broker.middleware:
                    for fork in middleware.forks:
                        fork_path = "%s:%s" % (fork.__module__, fork.__name__)
                        canteen_add(canteen, fork_path)

        logger.debug("Starting worker threads...")
        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Ejemplo n.º 14
0
def worker(*args, **kwargs):
    try:
        worker = Worker(*args, **kwargs)
        worker.start()
        yield worker
    finally:
        worker.stop()
Ejemplo n.º 15
0
def test_redis_requeues_unhandled_messages_on_shutdown(redis_broker):
    # Given that I have an actor that takes its time
    @dramatiq.actor
    def do_work():
        time.sleep(1)

    # If I send it two messages
    message_1 = do_work.send()
    message_2 = do_work.send()

    # Then start a worker and subsequently shut it down
    worker = Worker(redis_broker, worker_threads=1)
    worker.start()
    time.sleep(0.25)
    worker.stop()

    # I expect it to have processed one of the messages and re-enqueued the other
    messages = redis_broker.client.lrange(f"dramatiq:{do_work.queue_name}", 0,
                                          10)
    if message_1.options["redis_message_id"].encode("utf-8") not in messages:
        assert message_2.options["redis_message_id"].encode(
            "utf-8") in messages

    else:
        assert message_1.options["redis_message_id"].encode(
            "utf-8") in messages
Ejemplo n.º 16
0
def worker_process(args, worker_id, logging_pipe):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        for module in args.modules:
            importlib.import_module(module)

        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Ejemplo n.º 17
0
def test_actors_can_be_assigned_message_age_limits(stub_broker):
    # Given that I have a database
    runs = []

    # And an actor whose messages have an age limit
    @dramatiq.actor(max_age=100)
    def do_work():
        runs.append(1)

    # If I send it a message
    do_work.send()

    # And join on its queue after the age limit has passed
    time.sleep(0.1)
    worker = Worker(stub_broker, worker_timeout=100)
    worker.start()
    stub_broker.join(do_work.queue_name)
    worker.join()
    worker.stop()

    # I expect the message to have been skipped
    assert sum(runs) == 0
Ejemplo n.º 18
0
def test_workers_dont_register_queues_that_arent_whitelisted(stub_broker):
    # Given that I have a worker object with a restricted set of queues
    worker = Worker(stub_broker, queues={"a", "b"})
    worker.start()

    try:
        # When I try to register a consumer for a queue that hasn't been whitelisted
        stub_broker.declare_queue("c")
        stub_broker.declare_queue("c.DQ")

        # Then a consumer should not get spun up for that queue
        assert "c" not in worker.consumers
        assert "c.DQ" not in worker.consumers
    finally:
        worker.stop()
Ejemplo n.º 19
0
def worker_process(worker_id, logging_fd):
    """consume worker to process messages and execute the actor"""
    # TODO preload registries
    db_name = Configuration.get('db_name')
    try:
        logging_pipe = os.fdopen(logging_fd, "w")
        broker = prepare_broker(withmiddleware=True)
        broker.emit_after("process_boot")
        BlokManager.load()
        registry = RegistryManager.get(db_name, loadwithoutmigration=True)
        if registry is None:
            logger.critical("No registry found for %s", db_name)
            return os._exit(4)

        worker = Worker(
            broker, worker_threads=Configuration.get('dramatiq_threads', 1))
        worker.start()
        print('worker started')
    except ImportError as e:
        logger.critical(e)
        return os._exit(2)
    except ConnectionError as e:
        logger.critical("Broker connection failed. %s", e)
        return os._exit(3)

    def termhandler(signum, frame):
        nonlocal running
        BlokManager.unload()
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return os._exit(1)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    signal.signal(signal.SIGHUP, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Ejemplo n.º 20
0
def test_redis_requeues_unhandled_delay_messages_on_shutdown(redis_broker):
    # Given that I have an actor that takes its time
    @dramatiq.actor
    def do_work():
        pass

    # If I send it a delayed message
    message = do_work.send_with_options(delay=10000)

    # Then start a worker and subsequently shut it down
    worker = Worker(redis_broker, worker_threads=1)
    worker.start()
    worker.stop()

    # I expect it to have re-enqueued the message
    messages = redis_broker.client.lrange("dramatiq:%s" % dq_name(do_work.queue_name), 0, 10)
    assert message.options["redis_message_id"].encode("utf-8") in messages
Ejemplo n.º 21
0
def worker_process(args, worker_id, logging_fd):
    try:
        logging_pipe = os.fdopen(logging_fd, "w")
        logger = setup_worker_logging(args, worker_id, logging_pipe)
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        for module in args.modules:
            importlib.import_module(module)

        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return os._exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return os._exit(RET_CONNECT)

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return os._exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    signal.signal(signal.SIGHUP, termhandler)

    running = True
    while running:
        time.sleep(1)

    worker.stop()
    broker.close()
    logging_pipe.close()
Ejemplo n.º 22
0
def worker_process(args, worker_id, logging_pipe, canteen, event):
    try:
        # Re-seed the random number generator from urandom on
        # supported platforms.  This should make it so that worker
        # processes don't all follow the same sequence.
        random.seed()

        logger = setup_worker_logging(args, worker_id, logging_pipe)
        logger.debug("Loading broker...")
        module, broker = import_broker(args.broker)
        broker.emit_after("process_boot")

        logger.debug("Loading modules...")
        for module in args.modules:
            importlib.import_module(module)

        if not canteen.initialized:
            with canteen.get_lock():
                if not canteen.initialized:
                    logger.debug("Sending forks to main process...")
                    for middleware in broker.middleware:
                        for fork in middleware.forks:
                            fork_path = "%s:%s" % (fork.__module__,
                                                   fork.__name__)
                            canteen_add(canteen, fork_path)

        logger.debug("Starting worker threads...")
        worker = Worker(broker,
                        queues=args.queues,
                        worker_threads=args.threads)
        worker.start()
    except ImportError:
        logger.exception("Failed to import module.")
        return sys.exit(RET_IMPORT)
    except ConnectionError:
        logger.exception("Broker connection failed.")
        return sys.exit(RET_CONNECT)
    finally:
        # Signal to the master process that this process has booted,
        # regardless of whether it failed or not.  If it did fail, the
        # worker process will realize that soon enough.
        event.set()

    def termhandler(signum, frame):
        nonlocal running
        if running:
            logger.info("Stopping worker process...")
            running = False
        else:
            logger.warning("Killing worker process...")
            return sys.exit(RET_KILLED)

    logger.info("Worker process is ready for action.")
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, termhandler)
    if hasattr(signal, "SIGHUP"):
        signal.signal(signal.SIGHUP, termhandler)
    if hasattr(signal, "SIGBREAK"):
        signal.signal(signal.SIGBREAK, termhandler)

    running = True
    while running and not worker.restart_requested:
        time.sleep(1)

    if worker.restart_requested:
        logger.info("Requesting worker restart.")
    worker.stop()
    logger.info("Worker stopped.")
    broker.close()

    logging_pipe.close()
    if worker.restart_requested:
        sys.exit(RET_RESTART)
Ejemplo n.º 23
0
def worker(broker):
    worker = Worker(broker, worker_timeout=100)
    worker.start()
    yield worker
    worker.stop()
Ejemplo n.º 24
0
def test_dramatiq_failed(
    broker: Broker, worker: Worker, backend: WriterBackend, frozen_time: Any
) -> None:
    class FailMessage(Middleware):
        def after_process_message(
            self,
            broker: Broker,
            message: Message,
            *,
            result: Any = None,
            exception: Optional[BaseException] = None,
        ) -> None:
            message.fail()

    @dramatiq.actor(queue_name="test")
    def simple_task_failed() -> None:
        return

    broker.add_middleware(FailMessage())

    message = simple_task_failed.send()
    simple_task_failed.send_with_options(log=False)

    assert backend.enqueued() == [
        EnqueuedLog(
            type=LogType.ENQUEUED,
            timestamp=datetime.now(),
            job_id=message.message_id,
            task_id="simple_task_failed",
            job=JobDetails(
                queue="test",
                task_path="tests.test_dramatiq.test_dramatiq_failed.<locals>"
                ".simple_task_failed",
                execute_at=None,
                args=[],
                kwargs={},
                options={},
            ),
        )
    ]

    assert backend.dequeued() == []
    assert backend.completed() == []

    worker.start()
    broker.join(simple_task_failed.queue_name)
    worker.join()

    assert backend.dequeued() == [
        DequeuedLog(
            job_id=message.message_id,
            task_id="simple_task_failed",
            timestamp=datetime.now(),
            type=LogType.DEQUEUED,
        )
    ]
    exceptions = backend.exception()
    for exception in exceptions:
        assert "Failed" in exception.exception
        exception.exception = ""
    assert exceptions == [
        ExceptionLog(
            job_id=message.message_id,
            task_id="simple_task_failed",
            timestamp=datetime.now(),
            type=LogType.EXCEPTION,
            exception="",
        )
    ]
Ejemplo n.º 25
0
def urlrabbitmq_worker(urlrabbitmq_broker):
    worker = Worker(urlrabbitmq_broker)
    worker.start()
    yield worker
    worker.stop()
Ejemplo n.º 26
0
def test_rabbitmq_broker_can_enqueue_messages_with_priority(rabbitmq_broker):
    max_priority = 10
    message_processing_order = []
    queue_name = "prioritized"

    # Given that I have an actor that store priorities
    @dramatiq.actor(queue_name=queue_name)
    def do_work(message_priority):
        message_processing_order.append(message_priority)

    worker = Worker(rabbitmq_broker, worker_threads=1)
    worker.queue_prefetch = 1
    worker.start()
    worker.pause()

    try:
        # When I send that actor messages with increasing priorities
        for priority in range(max_priority):
            do_work.send_with_options(args=(priority, ),
                                      broker_priority=priority)

        # And then tell the broker to wait for all messages
        worker.resume()
        rabbitmq_broker.join(queue_name, timeout=5000)
        worker.join()

        # I expect the stored priorities to be saved in decreasing order
        assert message_processing_order == list(reversed(range(max_priority)))
    finally:
        worker.stop()
Ejemplo n.º 27
0
def stub_worker(stub_broker):
    worker = Worker(stub_broker, worker_timeout=100)
    worker.start()
    yield worker
    worker.stop()
Ejemplo n.º 28
0
def stub_worker():
    worker = Worker(settings.broker, worker_timeout=100)
    worker.start()
    yield worker
    worker.stop()
Ejemplo n.º 29
0
def worker(broker: StubBroker) -> Generator[Worker, None, None]:
    worker = Worker(broker, worker_timeout=100)
    yield worker
    worker.stop()
Ejemplo n.º 30
0
def redis_worker(redis_broker):
    worker = Worker(redis_broker)
    worker.start()
    yield worker
    worker.stop()