Ejemplo n.º 1
0
async def test_startup_shutdown(arq_redis, scheduler_factory):
    calls = []

    async def startup(scheduler_ctx):
        scheduler_ctx['test'] = 123
        assert isinstance(scheduler_ctx.get('redis'), ArqRedis)
        calls.append('startup')

    async def shutdown(scheduler_ctx):
        assert scheduler_ctx['test'] == 123
        assert isinstance(scheduler_ctx.get('redis'), ArqRedis)
        calls.append('shutdown')

    darq = Darq(
        redis_settings=redis_settings,
        burst=True,
        on_scheduler_startup=startup,
        on_scheduler_shutdown=shutdown,
    )
    foobar_task = darq.task(foobar)
    darq.add_cron_jobs(cron(foobar_task, hour=1), )

    scheduler = scheduler_factory(darq)
    await scheduler.main()
    await scheduler.close()

    assert calls == ['startup', 'shutdown']
Ejemplo n.º 2
0
async def test_custom_queue_read_limit(arq_redis, worker_factory):
    darq = Darq(
        redis_settings=redis_settings, burst=True, max_jobs=4,
        queue_read_limit=2,
    )
    foobar_task = darq.task(foobar)
    await darq.connect()

    for _ in range(4):
        await foobar_task.delay()

    assert await arq_redis.zcard(default_queue_name) == 4
    worker = worker_factory(darq)
    worker.pool = arq_redis
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 0

    await worker._poll_iteration()
    await asyncio.sleep(0.1)
    assert await arq_redis.zcard(default_queue_name) == 2
    assert worker.jobs_complete == 2
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 0

    await worker._poll_iteration()
    await asyncio.sleep(0.1)
    assert await arq_redis.zcard(default_queue_name) == 0
    assert worker.jobs_complete == 4
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 0
Ejemplo n.º 3
0
async def test_set_health_check_key(arq_redis, worker_factory):
    darq = Darq(
        redis_settings=redis_settings, burst=True, poll_delay=0,
        health_check_key='darq:test:health-check',
    )
    darq.task(foobar)
    await arq_redis.enqueue_job(
        'tests.test_worker.foobar', [], {}, job_id='testing',
    )
    worker = worker_factory(darq)
    await worker.main()
    assert sorted(await arq_redis.keys('*')) == [
        'arq:result:testing',
        'darq:test:health-check',
    ]
Ejemplo n.º 4
0
async def test_incompatible_serializers_2(arq_redis, worker_factory):
    darq = Darq(
        redis_settings=redis_settings, burst=True,
        job_serializer=msgpack.packb,
        job_deserializer=functools.partial(msgpack.unpackb, raw=False),
    )
    darq.task(foobar)

    await arq_redis.enqueue_job(
        'tests.test_worker.foobar', [], {}, job_id='job_id',
    )
    worker = worker_factory(darq)
    await worker.main()
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 1
    assert worker.jobs_retried == 0
Ejemplo n.º 5
0
async def test_custom_serializers(arq_redis_msgpack, worker_factory):
    darq = Darq(
        redis_settings=redis_settings, burst=True,
        job_serializer=msgpack.packb,
        job_deserializer=functools.partial(msgpack.unpackb, raw=False),
    )
    foobar_task = darq.task(foobar)
    await darq.connect()

    j = await foobar_task.apply_async([], {}, job_id='job_id')
    worker = worker_factory(darq)
    info = await j.info()
    assert info.function == 'tests.test_worker.foobar'
    assert await worker.run_check() == 1
    assert await j.result() == 42
    r = await j.info()
    assert r.result == 42
Ejemplo n.º 6
0
async def test_log_health_check(arq_redis, worker_factory, caplog):
    caplog.set_level(logging.INFO)
    darq = Darq(
        redis_settings=redis_settings, burst=True, health_check_interval=0,
    )
    foobar_task = darq.task(foobar)
    await darq.connect()

    await foobar_task.apply_async([], {}, job_id='testing')
    worker = worker_factory(darq)
    await worker.main()
    await worker.main()
    await worker.main()
    assert worker.jobs_complete == 1

    expected = 'j_complete=1 j_failed=0 j_retried=0 j_ongoing=0 queued=0'
    assert expected in caplog.text
    # can happen more than once due to redis pool size
    # assert log.count('recording health') == 1
    assert 'recording health' in caplog.text
Ejemplo n.º 7
0
async def test_startup_shutdown(arq_redis, worker_factory):
    calls = []

    async def startup(ctx):
        calls.append('startup')

    async def shutdown(ctx):
        calls.append('shutdown')

    darq = Darq(
        redis_settings=redis_settings, burst=True,
        on_startup=startup, on_shutdown=shutdown,
    )
    foobar_task = darq.task(foobar)
    await darq.connect()

    await foobar_task.apply_async([], {}, job_id='testing')
    worker = worker_factory(darq)
    await worker.main()
    await worker.close()

    assert calls == ['startup', 'shutdown']
Ejemplo n.º 8
0
import re
from unittest.mock import patch

from aiohttp.test_utils import loop_context
from click.testing import CliRunner

from darq import Darq
from darq.cli import cli
from darq.cron import cron
from . import redis_settings

darq = Darq(redis_settings=redis_settings, burst=True)


@darq.task
async def foobar(ctx):
    return 42


def test_help():
    runner = CliRunner()
    result = runner.invoke(cli, ['--help'])
    assert result.exit_code == 0
    assert result.output.startswith('Usage: darq [OPTIONS] COMMAND [ARGS]...\n')


def test_worker_run():
    runner = CliRunner()
    with loop_context():
        result = runner.invoke(cli, ['-A', 'tests.test_cli.darq', 'worker'])
    assert result.exit_code == 0