Пример #1
0
def test_shutdown_notifications_worker_shutdown_messages(raise_thread_exception, caplog):
    # capture all messages
    caplog.set_level(logging.NOTSET)

    # Given a middleware with two "threads"
    middleware = shutdown.ShutdownNotifications()
    middleware.manager.notifications = [1, 2]

    # Given a broker configured with the shutdown notifier
    broker = StubBroker(middleware=[middleware])

    # When the worker is shutdown
    broker.emit_before("worker_shutdown", None)

    # Shutdown interrupts are raised in both threads
    raise_thread_exception.assert_has_calls([
        mock.call(1, shutdown.Shutdown),
        mock.call(2, shutdown.Shutdown),
    ])

    # And shutdown notifications are logged
    assert len(caplog.record_tuples) == 3
    assert caplog.record_tuples == [
        ("dramatiq.middleware.shutdown.ShutdownNotifications", logging.DEBUG, (
            "Sending shutdown notification to worker threads..."
        )),
        ("dramatiq.middleware.shutdown.ShutdownNotifications", logging.INFO, (
            "Worker shutdown notification. Raising exception in worker thread 1."
        )),
        ("dramatiq.middleware.shutdown.ShutdownNotifications", logging.INFO, (
            "Worker shutdown notification. Raising exception in worker thread 2."
        )),
    ]
Пример #2
0
def test_shutdown_notifications_gevent_worker_shutdown_messages(caplog):
    # capture all messages
    caplog.set_level(logging.NOTSET)

    # Given a middleware with two threads
    middleware = shutdown.ShutdownNotifications()
    greenlet_1 = gevent.spawn()
    greenlet_2 = gevent.spawn()
    middleware.manager.notification_greenlets = [(1, greenlet_1), (2, greenlet_2)]

    # Given a broker configured with the shutdown notifier
    broker = StubBroker(middleware=[middleware])

    # When the worker is shutdown
    broker.emit_before("worker_shutdown", None)

    # Shutdown interrupts are raised in both threads
    assert isinstance(greenlet_1.exception, shutdown.Shutdown)
    assert isinstance(greenlet_2.exception, shutdown.Shutdown)

    # And shutdown notifications are logged
    assert len(caplog.record_tuples) == 3
    assert caplog.record_tuples == [
        ("dramatiq.middleware.shutdown.ShutdownNotifications", logging.DEBUG, (
            "Sending shutdown notification to worker threads..."
        )),
        ("dramatiq.middleware.shutdown.ShutdownNotifications", logging.INFO, (
            "Worker shutdown notification. Raising exception in worker thread 1."
        )),
        ("dramatiq.middleware.shutdown.ShutdownNotifications", logging.INFO, (
            "Worker shutdown notification. Raising exception in worker thread 2."
        )),
    ]
Пример #3
0
def test_dramatiq_completion(
    broker: StubBroker, worker: Worker, backend: WriterBackend, frozen_time: Any
) -> None:
    @dramatiq.actor(queue_name="test")
    def simple_task(a: str, b: str) -> str:
        return "hello"

    message = simple_task.send("a", b="b")

    assert backend.enqueued() == [
        EnqueuedLog(
            type=LogType.ENQUEUED,
            timestamp=datetime.now(),
            job_id=message.message_id,
            task_id="simple_task",
            job=JobDetails(
                queue="test",
                task_path=(
                    "tests.test_dramatiq.test_dramatiq_completion.<locals>"
                    ".simple_task"
                ),
                execute_at=None,
                args=["a"],
                kwargs={"b": "b"},
                options={},
            ),
        )
    ]

    assert backend.dequeued() == []
    assert backend.completed() == []

    worker.start()
    broker.join(simple_task.queue_name)
    worker.join()

    assert backend.dequeued() == [
        DequeuedLog(
            job_id=message.message_id,
            task_id="simple_task",
            timestamp=datetime.now(),
            type=LogType.DEQUEUED,
        )
    ]
    assert backend.completed() == [
        CompletedLog(
            job_id=message.message_id,
            task_id="simple_task",
            timestamp=datetime.now(),
            result="hello",
            type=LogType.COMPLETED,
        )
    ]
Пример #4
0
def stub_broker():
    broker = StubBroker()
    broker.emit_after("process_boot")
    dramatiq.set_broker(broker)
    yield broker
    broker.flush_all()
    broker.close()
Пример #5
0
def test_shutdown_notifications_platform_not_supported(recwarn, monkeypatch):
    # monkeypatch fake platform to test logging.
    monkeypatch.setattr(shutdown, "current_platform", "not supported")

    # Given a broker configured with the shutdown notifier
    broker = StubBroker(middleware=[shutdown.ShutdownNotifications()])

    # When the process boots
    broker.emit_after("process_boot")

    # A platform support warning is issued
    assert len(recwarn) == 1
    assert str(recwarn[0].message) == ("ShutdownNotifications cannot kill threads "
                                       "on your current platform ('not supported').")
Пример #6
0
def test_time_limit_platform_not_supported(recwarn, monkeypatch):
    # monkeypatch fake platform to test logging.
    monkeypatch.setattr(time_limit, "current_platform", "not supported")

    # Given a broker configured with time limits
    broker = StubBroker(middleware=[time_limit.TimeLimit()])

    # When the process boots
    broker.emit_after("process_boot")

    # A platform support warning is issued
    assert len(recwarn) == 1
    assert str(
        recwarn[0].message) == ("TimeLimit cannot kill threads "
                                "on your current platform ('not supported').")
Пример #7
0
def broker(sentry_init):
    sentry_init(integrations=[DramatiqIntegration()])
    broker = StubBroker()
    broker.emit_after("process_boot")
    dramatiq.set_broker(broker)
    yield broker
    broker.flush_all()
    broker.close()
    def test_create_range_for_stats_async(
        self,
        transactional_db,
        broker: stub.StubBroker,
        worker: dramatiq.Worker,
        hosting_provider_with_sample_user: ac_models.Hostingprovider,
        green_ip: gc_models.GreencheckIp,
        client,
    ):
        """
        Create a collection of daily stats, for a range of dates provided.

        """
        broker.declare_queue("default")
        generated_dates = self._set_up_dates_for_last_week()

        for date in generated_dates:
            gc = gc_factories.GreencheckFactory.create(date=date +
                                                       relativedelta(hours=2))
            # logger.info(f"gc {date}: {gc.__dict__}")

        logger.info(f"just this date: { generated_dates[0] }")

        gc_models.DailyStat.create_jobs_for_date_range_async(
            generated_dates, "total_count")

        # Wait for all the tasks to be processed
        broker.join("default")
        worker.join()

        green_stats = gc_models.DailyStat.objects.filter(
            green=gc_choices.BoolChoice.YES)
        grey_stats = gc_models.DailyStat.objects.filter(
            green=gc_choices.BoolChoice.NO)
        mixed_stats = gc_models.DailyStat.objects.exclude(
            green__in=[gc_choices.BoolChoice.YES, gc_choices.BoolChoice.NO])

        # have we generated the expected stats per day?
        assert green_stats.count() == 7
        assert grey_stats.count() == 7
        assert mixed_stats.count() == 7

        # we should one count showing zero green checks for each day
        assert [stat.count for stat in green_stats] == [0, 0, 0, 0, 0, 0, 0]

        # mixed and grey should be the same
        assert [stat.count for stat in grey_stats] == [1, 1, 1, 1, 1, 1, 1]
        assert [stat.count for stat in grey_stats] == [1, 1, 1, 1, 1, 1, 1]
Пример #9
0
def test_prometheus_middleware_exposes_metrics():
    try:
        # Given a broker
        broker = StubBroker()

        # And an instance of the prometheus middleware
        prom = Prometheus()
        prom.after_process_boot(broker)

        # When I request metrics via HTTP
        with request.urlopen("http://127.0.0.1:9191") as resp:
            # Then the response should be successful
            assert resp.getcode() == 200
    finally:
        prom.after_worker_shutdown(broker, None)
Пример #10
0
    def get_dramatiq_broker_object(self):
        """
        This method initializes the broker object for Dramatiq and saves it in
        Django's settings.
        """

        if self.DRAMATIQ_BROKER_URL:
            url = self.DRAMATIQ_BROKER_URL.strip()
            if url is None:
                kind, host, port = "stub", None, None
            else:
                kind, _, url = url.partition("://")
                host, _, port = url.partition(":")
                host = host or None
                port = int(port) if port else None
        else:
            kind = self.DRAMATIQ_BROKER_TYPE
            host = self.DRAMATIQ_BROKER_HOST or None
            port = self.DRAMATIQ_BROKER_PORT or None

        # Separate non-null args
        kwargs = [("host", host), ("port", port)]
        kwargs = {k: v for k, v in kwargs if v is not None}

        # Initializes broker
        if kind == "stub":
            from dramatiq.brokers.stub import StubBroker

            broker = StubBroker()
        elif kind == "redis":
            from dramatiq.brokers.redis import RedisBroker

            broker = RedisBroker(**kwargs)
        elif kind == "rabbitmq":
            from dramatiq.brokers.rabbitmq import RabbitmqBroker

            broker = RabbitmqBroker(**kwargs)
        else:
            raise ValueError(f"invalid dramatiq broker: {kind}")

        # Configure as default and exit
        dramatiq.set_broker(broker)
        return broker
Пример #11
0
def app_with_scout(config=None):
    """
    Context manager that configures a Dramatiq app with Scout middleware
    installed.
    """
    # Enable Scout by default in tests.
    if config is None:
        config = {"monitor": True}

    # Disable running the agent.
    config["core_agent_launch"] = False

    broker = StubBroker()
    broker.emit_after("process_boot")
    dramatiq.set_broker(broker)

    @dramatiq.actor(max_retries=0)
    def hello():
        return "Hello World!"

    @dramatiq.actor(max_retries=0)
    def fail():
        raise ValueError("BØØM!")  # non-ASCII

    worker = dramatiq.Worker(broker, worker_timeout=0)

    # Setup according to https://docs.scoutapm.com/#dramatiq
    Config.set(**config)
    broker.add_middleware(ScoutMiddleware(),
                          before=broker.middleware[0].__class__)
    worker.start()

    App = namedtuple("App", ["broker", "worker", "hello", "fail"])
    try:
        yield App(broker=broker, worker=worker, hello=hello, fail=fail)
    finally:
        worker.stop()
        # Reset Scout configuration.
        Config.reset_all()
    def test_create_stat_async(
        self,
        transactional_db,
        broker: stub.StubBroker,
        worker: dramatiq.Worker,
        hosting_provider_with_sample_user: ac_models.Hostingprovider,
        green_ip: gc_models.GreencheckIp,
        client,
    ):
        """
        Create a collection of daily stats, for a range of dates provided,
        but have a worker create the stats asynchronously.
        """

        broker.declare_queue("default")
        assert gc_models.DailyStat.objects.count() == 0
        # set up our date range
        generated_dates = self._set_up_dates_for_last_week()

        for date in generated_dates:
            gc_factories.GreencheckFactory.create(date=date +
                                                  relativedelta(hours=2))

        chosen_date = str(generated_dates[0].date())

        # we use the 'send' with the 'transactional_db' fixture here instead of db
        # because if we use the regular db fixture, the workers can not see what is
        # happening 'inside' this test. TODO: check that this really is the
        # explanation for this strange test behaviour

        gc_tasks.create_stat_async.send(date_string=chosen_date,
                                        query_name="total_count")

        # import ipdb

        # ipdb.set_trace()

        # Wait for all the tasks to be processed
        broker.join(gc_tasks.create_stat_async.queue_name)
        worker.join()

        # import ipdb

        # ipdb.set_trace()

        # hae we generate the daily stats?
        assert gc_models.DailyStat.objects.count() == 3

        # do that they have the right date?
        for stat in gc_models.DailyStat.objects.all():
            assert str(stat.stat_date) == chosen_date

        # do the stats count up to what we expect?
        green_daily_stat = gc_models.DailyStat.objects.filter(
            green=gc_choices.BoolChoice.YES).first()
        grey_daily_stat = gc_models.DailyStat.objects.filter(
            green=gc_choices.BoolChoice.NO).first()
        mixed_daily_stat = gc_models.DailyStat.objects.exclude(
            green__in=[gc_choices.BoolChoice.YES, gc_choices.BoolChoice.NO
                       ]).first()

        assert green_daily_stat.count == 0
        assert grey_daily_stat.count == 1
        assert mixed_daily_stat.count == 1
Пример #13
0
def broker(backend: WriterBackend) -> StubBroker:
    stub_broker = StubBroker(middleware=[TaskLogsMiddleware(backend=backend)])
    stub_broker.emit_after("process_boot")
    dramatiq.set_broker(stub_broker)
    return stub_broker
Пример #14
0

class InitDB(dramatiq.Middleware):
    def before_worker_boot(self, broker, worker):
        async def run():
            from . import db

            await db.db.set_bind(db.CONNECTION_STR)

        loop.run_until_complete(run())

    def before_worker_shutdown(self, broker, worker):
        shutdown.set()


MIDDLEWARE = [
    m()
    for m in (AgeLimit, ShutdownNotifications, Callbacks, Pipelines, Retries)
]

if TEST:
    broker = StubBroker(middleware=MIDDLEWARE)
    broker.emit_after("process_boot")
else:
    broker = RedisBroker(
        connection_pool=redis.ConnectionPool.from_url(REDIS_HOST),
        middleware=MIDDLEWARE)

broker.add_middleware(InitDB())
dramatiq.set_broker(broker)
Пример #15
0
import os
import signal
import time

from dramatiq.brokers.stub import StubBroker

from .common import skip_on_windows

broker = StubBroker()


def remove(filename):
    try:
        os.remove(filename)
    except OSError:
        pass


@skip_on_windows
def test_cli_scrubs_stale_pid_files(start_cli):
    try:
        # Given that I have an existing file containing an old pid
        filename = "test_scrub.pid"
        with open(filename, "w") as f:
            f.write("999999")

        # When I try to start the cli and pass that file as a PID file
        proc = start_cli("tests.test_pidfile:broker", extra_args=["--pid-file", filename])

        # And I wait for it to write the pid file
        time.sleep(1)
Пример #16
0
import os

from dramatiq.brokers.rabbitmq import RabbitmqBroker
from dramatiq.brokers.stub import StubBroker

if os.getenv("UNIT_TESTS") == "1":
    broker = StubBroker(worker_timeout=1000, worker_threads=1)
    broker.emit_after("process_boot")
else:
    broker = RabbitmqBroker()
Пример #17
0
from lib.telegram import TelegramBot
from lib.telegram.command import Command

logger = logging.getLogger(__name__)
logger.setLevel(settings.LOG_LEVEL)

rate_limits_backend: RateLimiterBackend
broker: dramatiq.Broker
result_backend: ResultBackend

if settings.UNIT_TESTS:
    # Setup backends
    result_backend = ResultStubBackend()
    rate_limits_backend = RateLimitsStubBackend()
    # Setup brokers
    broker = StubBroker()
else:
    logger.info("Setup redis broker")
    # Setup backends
    result_backend = ResultRedisBackend(url=settings.RESULT_REDIS_URL)
    rate_limits_backend = RateLimitsRedisBackend(
        url=settings.RATE_LIMITS_REDIS_URL)
    # Setup brokers
    broker = RedisBroker(url=settings.BROKER_REDIS_URL)

results = Results(backend=result_backend)
broker.add_middleware(results)

if settings.PROMETHEUS_METRICS_SERVER_ENABLE:
    broker.add_middleware(
        Prometheus(
Пример #18
0
def stub_broker():
    from dramatiq.brokers.stub import StubBroker
    broker = StubBroker()
    actors.initialize_broker(broker)
    yield broker
Пример #19
0
import contextlib
from unittest.mock import MagicMock

import dramatiq
import pytest
from dramatiq import Worker
from dramatiq.brokers.stub import StubBroker
from sqlalchemy import MetaData, Table

from events_api import tasks
from events_api.app import create_app
from events_api.core.project import database

broker = StubBroker()
broker.emit_after("process_boot")


def clear_tables(engine, table_list=None):
    meta = MetaData()
    with contextlib.closing(engine.connect()) as con:
        trans = con.begin()
        if table_list is None:
            meta.reflect(bind=engine)
            for table in reversed(meta.sorted_tables):
                con.execute(table.delete())
        else:
            for table_name in table_list:
                table = Table(table_name,
                              meta,
                              autoload=True,
                              autoload_with=engine)
Пример #20
0
from pathlib import PureWindowsPath, PurePosixPath

import docker as _docker
import dramatiq
import socketio

from dramatiq.brokers.rabbitmq import RabbitmqBroker
from dramatiq.brokers.stub import StubBroker

from api import config

docker = _docker.from_env()
broker = StubBroker() if config.UNIT_TESTING else RabbitmqBroker(
    host=config.RABBITMQ_HOST)
dramatiq.set_broker(broker)
external_sio = socketio.AsyncAioPikaManager(write_only=True)

HOST_PATH_TYPE = PureWindowsPath if config.DOCKER_HOST_OS.lower(
) == 'windows' else PurePosixPath