Esempio n. 1
0
async def test_error_success(arq_redis: ArqRedis, worker):
    j = await arq_redis.enqueue_job('fails')
    worker: Worker = worker(functions=[func(fails, name='fails')])
    await worker.async_run()
    assert (worker.jobs_complete, worker.jobs_failed, worker.jobs_retried) == (0, 1, 0)
    info = await j.result_info()
    assert info.success is False
Esempio n. 2
0
async def test_run_check_error2(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('fails')
    await arq_redis.enqueue_job('fails')
    worker: Worker = worker(functions=[func(fails, name='fails')])
    with pytest.raises(FailedJobs, match='2 jobs failed') as exc_info:
        await worker.run_check()
    assert len(exc_info.value.job_results) == 2
Esempio n. 3
0
async def test_enqueue_job(arq_redis: ArqRedis, worker):
    async def foobar(ctx):
        return 42

    j = await arq_redis.enqueue_job('foobar')
    worker: Worker = worker(functions=[func(foobar, name='foobar')])
    await worker.main()
    r = await j.result(pole_delay=0)
    assert r == 42  # 1
Esempio n. 4
0
async def test_job_error(arq_redis: ArqRedis, worker):
    async def foobar(ctx):
        raise RuntimeError('foobar error')

    j = await arq_redis.enqueue_job('foobar')
    worker: Worker = worker(functions=[func(foobar, name='foobar')])
    await worker.main()

    with pytest.raises(RuntimeError, match='foobar error'):
        await j.result(pole_delay=0)
Esempio n. 5
0
async def test_enqueue_job_nested(arq_redis: ArqRedis, worker):
    async def foobar(ctx):
        return 42

    async def parent_job(ctx):
        inner_job = await ctx['redis'].enqueue_job('foobar')
        return inner_job.job_id

    job = await arq_redis.enqueue_job('parent_job')
    worker: Worker = worker(functions=[
        func(parent_job, name='parent_job'),
        func(foobar, name='foobar')
    ])

    await worker.main()
    result = await job.result(poll_delay=0)
    assert result is not None
    inner_job = Job(result, arq_redis)
    inner_result = await inner_job.result(poll_delay=0)
    assert inner_result == 42
Esempio n. 6
0
async def test_custom_try2(arq_redis: ArqRedis, worker):
    async def foobar(ctx):
        if ctx['job_try'] == 3:
            raise Retry()
        return ctx['job_try']

    j1 = await arq_redis.enqueue_job('foobar', _job_try=3)
    w: Worker = worker(functions=[func(foobar, name='foobar')])
    await w.main()
    r = await j1.result(pole_delay=0)
    assert r == 4
Esempio n. 7
0
async def test_remain_keys_keep_results_forever(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('foobar', _job_id='testing')
    assert sorted(await
                  arq_redis.keys('*')) == ['arq:job:testing', 'arq:queue']
    worker: Worker = worker(functions=[func(foobar)], keep_result_forever=True)
    await worker.main()
    assert sorted(await arq_redis.keys('*')) == [
        'arq:queue:health-check', 'arq:result:testing'
    ]
    ttl_result = await arq_redis.ttl('arq:result:testing')
    assert ttl_result == -1
Esempio n. 8
0
async def test_multi_exec(arq_redis: ArqRedis, worker, caplog):
    async def foo(ctx, v):
        return v + 1

    caplog.set_level(logging.DEBUG, logger='arq.worker')
    await arq_redis.enqueue_job('foo', 1, _job_id='testing')
    worker: Worker = worker(functions=[func(foo, name='foo')])
    await asyncio.gather(*[worker.start_jobs(['testing']) for _ in range(5)])
    # debug(caplog.text)
    assert 'multi-exec error, job testing already started elsewhere' in caplog.text
    assert 'WatchVariableError' not in caplog.text
Esempio n. 9
0
async def test_custom_try2(arq_redis: ArqRedis, worker):
    async def foobar(ctx):
        if ctx['job_try'] == 3:
            raise Retry()
        return ctx['job_try']

    j1 = await arq_redis.enqueue_job('foobar', _job_try=3)
    w: Worker = worker(functions=[func(foobar, name='foobar')])
    await w.main()
    r = await j1.result(poll_delay=0)
    assert r == 4
Esempio n. 10
0
async def test_return_exception(arq_redis: ArqRedis, worker):
    async def return_error(ctx):
        return TypeError('xxx')

    j = await arq_redis.enqueue_job('return_error')
    worker: Worker = worker(functions=[func(return_error, name='return_error')])
    await worker.async_run()
    assert (worker.jobs_complete, worker.jobs_failed, worker.jobs_retried) == (1, 0, 0)
    r = await j.result(pole_delay=0)
    assert isinstance(r, TypeError)
    info = await j.result_info()
    assert info.success is True
Esempio n. 11
0
async def test_return_exception(arq_redis: ArqRedis, worker):
    async def return_error(ctx):
        return TypeError('xxx')

    j = await arq_redis.enqueue_job('return_error')
    worker: Worker = worker(functions=[func(return_error, name='return_error')])
    await worker.async_run()
    assert (worker.jobs_complete, worker.jobs_failed, worker.jobs_retried) == (1, 0, 0)
    r = await j.result(pole_delay=0)
    assert isinstance(r, TypeError)
    info = await j.result_info()
    assert info['success'] is True
Esempio n. 12
0
async def test_max_bursts_dont_get(arq_redis: ArqRedis, worker):
    async def foo(ctx, v):
        return v + 1

    await arq_redis.enqueue_job('foo', 1)
    await arq_redis.enqueue_job('foo', 2)
    worker: Worker = worker(functions=[func(foo, name='foo')])

    worker.max_burst_jobs = 0
    assert len(worker.tasks) == 0
    await worker._poll_iteration()
    assert len(worker.tasks) == 0
Esempio n. 13
0
async def test_cant_pickle_result(arq_redis: ArqRedis, worker):
    class Foobar:
        def __getstate__(self):
            raise TypeError("this doesn't pickle")

    async def foobar(ctx):
        return Foobar()

    j1 = await arq_redis.enqueue_job('foobar')
    w: Worker = worker(functions=[func(foobar, name='foobar')])
    await w.main()
    with pytest.raises(PickleError):
        await j1.result(pole_delay=0)
Esempio n. 14
0
async def test_job_retry_dont_retry(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        raise Retry(defer=0.01)

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    with pytest.raises(FailedJobs) as exc_info:
        await worker.run_check(retry_jobs=False)
    assert str(exc_info.value) == '1 job failed <Retry defer 0.01s>'

    assert '↻' not in caplog.text
    assert '! testing:retry failed, Retry: <Retry defer 0.01s>\n' in caplog.text
Esempio n. 15
0
async def test_enqueue_job_custom_queue(arq_redis: ArqRedis, worker):
    async def foobar(ctx):
        return 42

    async def parent_job(ctx):
        inner_job = await ctx['redis'].enqueue_job('foobar')
        return inner_job.job_id

    job = await arq_redis.enqueue_job('parent_job', _queue_name='spanner')

    worker: Worker = worker(
        functions=[func(parent_job, name='parent_job'), func(foobar, name='foobar')],
        arq_redis=None,
        queue_name='spanner',
    )

    await worker.main()
    inner_job_id = await job.result(poll_delay=0)
    assert inner_job_id is not None
    inner_job = Job(inner_job_id, arq_redis, _queue_name='spanner')
    inner_result = await inner_job.result(poll_delay=0)
    assert inner_result == 42
Esempio n. 16
0
async def test_cant_pickle_result(arq_redis: ArqRedis, worker):
    class Foobar:
        def __getstate__(self):
            raise TypeError("this doesn't pickle")

    async def foobar(ctx):
        return Foobar()

    j1 = await arq_redis.enqueue_job('foobar')
    w: Worker = worker(functions=[func(foobar, name='foobar')])
    await w.main()
    with pytest.raises(SerializationError, match='unable to serialize result'):
        await j1.result(poll_delay=0)
Esempio n. 17
0
async def test_enqueue_job_nested_custom_serializer(arq_redis_msgpack: ArqRedis, worker):
    async def foobar(ctx):
        return 42

    async def parent_job(ctx):
        inner_job = await ctx['redis'].enqueue_job('foobar')
        return inner_job.job_id

    job = await arq_redis_msgpack.enqueue_job('parent_job')

    worker: Worker = worker(
        functions=[func(parent_job, name='parent_job'), func(foobar, name='foobar')],
        arq_redis=None,
        job_serializer=msgpack.packb,
        job_deserializer=functools.partial(msgpack.unpackb, raw=False),
    )

    await worker.main()
    result = await job.result(poll_delay=0)
    assert result is not None
    inner_job = Job(result, arq_redis_msgpack, _deserializer=functools.partial(msgpack.unpackb, raw=False))
    inner_result = await inner_job.result(poll_delay=0)
    assert inner_result == 42
Esempio n. 18
0
async def test_retry_lots(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        raise Retry()

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    await worker.main()
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 1
    assert worker.jobs_retried == 5

    log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records))
    assert '  X.XXs ! testing:retry max retries 5 exceeded' in log
Esempio n. 19
0
async def test_retry_lots(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        raise Retry()

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    await worker.main()
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 1
    assert worker.jobs_retried == 5

    log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records))
    assert '  X.XXs ! testing:retry max retries 5 exceeded' in log
Esempio n. 20
0
async def test_max_bursts_multiple(arq_redis: ArqRedis, worker, caplog):
    async def foo(ctx, v):
        return v + 1

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('foo', 1)
    await arq_redis.enqueue_job('foo', 2)
    worker: Worker = worker(functions=[func(foo, name='foo')])
    assert await worker.run_check(max_burst_jobs=1) == 1
    assert worker.jobs_complete == 1
    assert worker.jobs_retried == 0
    assert worker.jobs_failed == 0
    assert 'foo(1)' in caplog.text
    assert 'foo(2)' not in caplog.text
Esempio n. 21
0
async def test_job_retry_max_jobs(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        raise Retry(defer=0.01)

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    assert await worker.run_check(max_burst_jobs=1) == 0
    assert worker.jobs_complete == 0
    assert worker.jobs_retried == 1
    assert worker.jobs_failed == 0

    log = re.sub(r'(\d+).\d\ds', r'\1.XXs', caplog.text)
    assert '0.XXs ↻ testing:retry retrying job in 0.XXs\n' in log
    assert '0.XXs → testing:retry() try=2\n' not in log
Esempio n. 22
0
async def test_multi_exec(arq_redis: ArqRedis, worker, caplog):
    c = 0

    async def foo(ctx, v):
        nonlocal c
        c += 1
        return v + 1

    caplog.set_level(logging.DEBUG, logger='arq.worker')
    await arq_redis.enqueue_job('foo', 1, _job_id='testing')
    worker: Worker = worker(functions=[func(foo, name='foo')])
    await asyncio.gather(*[worker.start_jobs([b'testing']) for _ in range(5)])
    # debug(caplog.text)
    await worker.main()
    assert c == 1
Esempio n. 23
0
async def test_cancel_error(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        if ctx['job_try'] == 1:
            raise asyncio.CancelledError()

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    await worker.main()
    assert worker.jobs_complete == 1
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 1

    log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records))
    assert 'X.XXs ↻ testing:retry cancelled, will be run again' in log
Esempio n. 24
0
async def test_unpickleable(arq_redis: ArqRedis, worker, caplog):
    caplog.set_level(logging.INFO)

    class Foo:
        pass

    async def example(ctx):
        return Foo()

    await arq_redis.enqueue_job('example', _job_id='testing')
    worker: Worker = worker(functions=[func(example, name='example')])
    await worker.main()

    log = re.sub(r'(\d+).\d\ds', r'\1.XXs', '\n'.join(r.message for r in caplog.records))
    assert 'error pickling result of testing:example' in log
Esempio n. 25
0
async def test_unpickleable(arq_redis: ArqRedis, worker, caplog):
    caplog.set_level(logging.INFO)

    class Foo:
        pass

    async def example(ctx):
        return Foo()

    await arq_redis.enqueue_job('example', _job_id='testing')
    worker: Worker = worker(functions=[func(example, name='example')])
    await worker.main()

    log = re.sub(r'(\d+).\d\ds', r'\1.XXs', '\n'.join(r.message for r in caplog.records))
    assert 'error pickling result of testing:example' in log
Esempio n. 26
0
async def test_non_burst(arq_redis: ArqRedis, worker, caplog, loop):
    async def foo(ctx, v):
        return v + 1

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('foo', 1, _job_id='testing')
    worker: Worker = worker(functions=[func(foo, name='foo')])
    worker.burst = False
    t = loop.create_task(worker.main())
    await asyncio.sleep(0.1)
    t.cancel()
    assert worker.jobs_complete == 1
    assert worker.jobs_retried == 0
    assert worker.jobs_failed == 0
    assert '← testing:foo ● 2' in caplog.text
Esempio n. 27
0
async def test_cancel_error(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        if ctx['job_try'] == 1:
            raise asyncio.CancelledError()

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    await worker.main()
    assert worker.jobs_complete == 1
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 1

    log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records))
    assert 'X.XXs ↻ testing:retry cancelled, will be run again' in log
Esempio n. 28
0
async def test_max_jobs_completes(arq_redis: ArqRedis, worker):
    v = 0

    async def raise_second_time(ctx):
        nonlocal v
        v += 1
        if v > 1:
            raise ValueError('xxx')

    await arq_redis.enqueue_job('raise_second_time')
    await arq_redis.enqueue_job('raise_second_time')
    await arq_redis.enqueue_job('raise_second_time')
    worker: Worker = worker(functions=[func(raise_second_time, name='raise_second_time')])
    with pytest.raises(FailedJobs) as exc_info:
        await worker.run_check(max_burst_jobs=3)
    assert repr(exc_info.value).startswith('<2 jobs failed:')
Esempio n. 29
0
async def test_job_timeout(arq_redis: ArqRedis, worker, caplog):
    async def longfunc(ctx):
        await asyncio.sleep(0.3)

    caplog.set_level(logging.ERROR)
    await arq_redis.enqueue_job('longfunc', _job_id='testing')
    worker: Worker = worker(functions=[func(longfunc, name='longfunc')], job_timeout=0.2, poll_delay=0.1)
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 0
    await worker.main()
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 1
    assert worker.jobs_retried == 0
    log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records))
    assert 'X.XXs ! testing:longfunc failed, TimeoutError:' in log
Esempio n. 30
0
async def test_job_retry(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        if ctx['job_try'] <= 2:
            raise Retry(defer=0.01)

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    await worker.main()
    assert worker.jobs_complete == 1
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 2

    log = re.sub(r'(\d+).\d\ds', r'\1.XXs', '\n'.join(r.message for r in caplog.records))
    assert '0.XXs ↻ testing:retry retrying job in 0.XXs\n' in log
    assert '0.XXs → testing:retry() try=2\n' in log
    assert '0.XXs ← testing:retry ●' in log
Esempio n. 31
0
async def test_job_retry(arq_redis: ArqRedis, worker, caplog):
    async def retry(ctx):
        if ctx['job_try'] <= 2:
            raise Retry(defer=0.01)

    caplog.set_level(logging.INFO)
    await arq_redis.enqueue_job('retry', _job_id='testing')
    worker: Worker = worker(functions=[func(retry, name='retry')])
    await worker.main()
    assert worker.jobs_complete == 1
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 2

    log = re.sub(r'(\d+).\d\ds', r'\1.XXs', '\n'.join(r.message for r in caplog.records))
    assert '0.XXs ↻ testing:retry retrying job in 0.XXs\n' in log
    assert '0.XXs → testing:retry() try=2\n' in log
    assert '0.XXs ← testing:retry ●' in log
Esempio n. 32
0
async def test_mung(arq_redis: ArqRedis, worker):
    """
    check a job can't be enqueued multiple times with the same id
    """
    counter = Counter()

    async def count(ctx, v):
        counter[v] += 1

    tasks = []
    for i in range(50):
        tasks.extend(
            [arq_redis.enqueue_job('count', i, _job_id=f'v-{i}'), arq_redis.enqueue_job('count', i, _job_id=f'v-{i}')]
        )
    shuffle(tasks)
    await asyncio.gather(*tasks)

    worker: Worker = worker(functions=[func(count, name='count')])
    await worker.main()
    assert counter.most_common(1)[0][1] == 1  # no job go enqueued twice
Esempio n. 33
0
async def test_mung(arq_redis: ArqRedis, worker):
    """
    check a job can't be enqueued multiple times with the same id
    """
    counter = Counter()

    async def count(ctx, v):
        counter[v] += 1

    tasks = []
    for i in range(50):
        tasks.extend(
            [arq_redis.enqueue_job('count', i, _job_id=f'v-{i}'), arq_redis.enqueue_job('count', i, _job_id=f'v-{i}')]
        )
    shuffle(tasks)
    await asyncio.gather(*tasks)

    worker: Worker = worker(functions=[func(count, name='count')])
    await worker.main()
    assert counter.most_common(1)[0][1] == 1  # no job go enqueued twice
Esempio n. 34
0
async def test_abort_job_before(arq_redis: ArqRedis, worker, caplog, loop):
    async def longfunc(ctx):
        await asyncio.sleep(3600)

    caplog.set_level(logging.INFO)

    job = await arq_redis.enqueue_job('longfunc', _job_id='testing')

    worker: Worker = worker(functions=[func(longfunc, name='longfunc')], allow_abort_jobs=True, poll_delay=0.1)
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 0
    assert worker.jobs_retried == 0
    with pytest.raises(asyncio.TimeoutError):
        await job.abort(timeout=0)
    await worker.main()
    assert worker.jobs_complete == 0
    assert worker.jobs_failed == 1
    assert worker.jobs_retried == 0
    log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records))
    assert 'X.XXs ⊘ testing:longfunc aborted before start' in log
    await worker.main()
    assert worker.aborting_tasks == set()
    assert worker.job_tasks == {}
    assert worker.tasks == {}
Esempio n. 35
0
async def test_run_check_passes(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('foobar')
    await arq_redis.enqueue_job('foobar')
    worker: Worker = worker(functions=[func(foobar, name='foobar')])
    assert 2 == await worker.run_check()
Esempio n. 36
0
async def test_run_check_error(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('fails')
    worker: Worker = worker(functions=[func(fails, name='fails')])
    with pytest.raises(FailedJobs, match='1 job failed "TypeError: my type error"'):
        await worker.run_check()
Esempio n. 37
0
async def test_run_check_error(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('fails')
    worker: Worker = worker(functions=[func(fails, name='fails')])
    with pytest.raises(FailedJobs, match='1 job failed "TypeError: my type error"'):
        await worker.run_check()
Esempio n. 38
0
async def test_run_check_passes(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('foobar')
    await arq_redis.enqueue_job('foobar')
    worker: Worker = worker(functions=[func(foobar, name='foobar')])
    assert 2 == await worker.run_check()
Esempio n. 39
0
async def test_remain_keys_no_results(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('foobar', _job_id='testing')
    assert sorted(await arq_redis.keys('*')) == ['arq:job:testing', 'arq:queue']
    worker: Worker = worker(functions=[func(foobar, keep_result=0)])
    await worker.main()
    assert sorted(await arq_redis.keys('*')) == ['arq:health-check']
Esempio n. 40
0
 class Settings:
     functions = [func(foobar, name='foobar')]
     burst = True
     poll_delay = 0
     queue_read_limit = 10
Esempio n. 41
0
async def test_remain_keys_no_results(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('foobar', _job_id='testing')
    assert sorted(await arq_redis.keys('*')) == ['arq:job:testing', 'arq:queue']
    worker: Worker = worker(functions=[func(foobar, keep_result=0)])
    await worker.main()
    assert sorted(await arq_redis.keys('*')) == ['arq:health-check']
Esempio n. 42
0
 class Settings:
     functions = [func(foobar, name='foobar')]
     burst = True
     poll_delay = 0
Esempio n. 43
0
async def test_set_health_check_key(arq_redis: ArqRedis, worker):
    await arq_redis.enqueue_job('foobar', _job_id='testing')
    worker: Worker = worker(functions=[func(foobar, keep_result=0)], health_check_key='arq:test:health-check')
    await worker.main()
    assert sorted(await arq_redis.keys('*')) == ['arq:test:health-check']