async def test_enqueue_job_different_queues(narq_redis: NarqRedis, worker): async def foobar(ctx): return 42 j1 = await narq_redis.enqueue_job('foobar', _queue_name='narq:queue1') j2 = await narq_redis.enqueue_job('foobar', _queue_name='narq:queue2') worker1: Worker = worker(functions=[func(foobar, name='foobar')], queue_name='narq:queue1') worker2: Worker = worker(functions=[func(foobar, name='foobar')], queue_name='narq:queue2') await worker1.main() await worker2.main() r1 = await j1.result(pole_delay=0) r2 = await j2.result(pole_delay=0) assert r1 == 42 # 1 assert r2 == 42 # 2
async def test_remain_keys_no_results(narq_redis: NarqRedis, worker): await narq_redis.enqueue_job('foobar', _job_id='testing') assert sorted(await narq_redis.keys('*')) == ['narq:job:testing', 'narq:queue'] worker: Worker = worker(functions=[func(foobar, keep_result=0)]) await worker.main() assert sorted(await narq_redis.keys('*')) == ['narq:queue:health-check']
async def test_run_check_error2(narq_redis: NarqRedis, worker): await narq_redis.enqueue_job('fails') await narq_redis.enqueue_job('fails') worker: Worker = worker(functions=[func(fails, name='fails')]) with pytest.raises(FailedJobs, match='2 jobs failed:\n') as exc_info: await worker.run_check() assert len(exc_info.value.job_results) == 2
async def test_enqueue_job_nested(narq_redis: NarqRedis, worker): async def foobar(ctx): return 42 async def parent_job(ctx): inner_job = await ctx['redis'].enqueue_job('foobar') return inner_job.job_id job = await narq_redis.enqueue_job('parent_job') worker: Worker = worker(functions=[func(parent_job, name='parent_job'), func(foobar, name='foobar')]) await worker.main() result = await job.result(pole_delay=0) assert result is not None inner_job = Job(result, narq_redis) inner_result = await inner_job.result(pole_delay=0) assert inner_result == 42
async def test_retry_lots_without_keep_result(narq_redis: NarqRedis, worker): async def retry(ctx): raise Retry() await narq_redis.enqueue_job('retry', _job_id='testing') worker: Worker = worker(functions=[func(retry, name='retry')], keep_result=0) await worker.main() # Should not raise MultiExecError
async def test_error_success(narq_redis: NarqRedis, worker): j = await narq_redis.enqueue_job('fails') worker: Worker = worker(functions=[func(fails, name='fails')]) await worker.main() assert (worker.jobs_complete, worker.jobs_failed, worker.jobs_retried) == (0, 1, 0) info = await j.result_info() assert info.success is False
async def test_retry_lots_check(narq_redis: NarqRedis, worker, caplog): async def retry(ctx): raise Retry() caplog.set_level(logging.INFO) await narq_redis.enqueue_job('retry', _job_id='testing') worker: Worker = worker(functions=[func(retry, name='retry')]) with pytest.raises(FailedJobs, match='max 5 retries exceeded'): await worker.run_check()
async def test_enqueue_job(narq_redis: NarqRedis, worker): async def foobar(ctx): return 42 j = await narq_redis.enqueue_job('foobar') worker: Worker = worker(functions=[func(foobar, name='foobar')]) await worker.main() r = await j.result(pole_delay=0) assert r == 42 # 1
async def test_max_bursts_sub_call(narq_redis: NarqRedis, worker, caplog): async def foo(ctx, v): return v + 1 async def bar(ctx, v): await ctx['redis'].enqueue_job('foo', v + 1) caplog.set_level(logging.INFO) await narq_redis.enqueue_job('bar', 10) worker: Worker = worker( functions=[func(foo, name='foo'), func(bar, name='bar')]) assert await worker.run_check(max_burst_jobs=1) == 1 assert worker.jobs_complete == 1 assert worker.jobs_retried == 0 assert worker.jobs_failed == 0 assert 'bar(10)' in caplog.text assert 'foo' in caplog.text
async def test_job_error(narq_redis: NarqRedis, worker): async def foobar(ctx): raise RuntimeError('foobar error') j = await narq_redis.enqueue_job('foobar') worker: Worker = worker(functions=[func(foobar, name='foobar')]) await worker.main() with pytest.raises(RuntimeError, match='foobar error'): await j.result(pole_delay=0)
async def test_multi_exec(narq_redis: NarqRedis, worker, caplog): async def foo(ctx, v): return v + 1 caplog.set_level(logging.DEBUG, logger='narq.worker') await narq_redis.enqueue_job('foo', 1, _job_id='testing') worker: Worker = worker(functions=[func(foo, name='foo')]) await asyncio.gather(*[worker.start_jobs(['testing']) for _ in range(5)]) # debug(caplog.text) assert 'multi-exec error, job testing already started elsewhere' in caplog.text assert 'WatchVariableError' not in caplog.text
async def test_custom_try2(narq_redis: NarqRedis, worker): async def foobar(ctx): if ctx['job_try'] == 3: raise Retry() return ctx['job_try'] j1 = await narq_redis.enqueue_job('foobar', _job_try=3) w: Worker = worker(functions=[func(foobar, name='foobar')]) await w.main() r = await j1.result(pole_delay=0) assert r == 4
async def test_max_bursts_dont_get(narq_redis: NarqRedis, worker): async def foo(ctx, v): return v + 1 await narq_redis.enqueue_job('foo', 1) await narq_redis.enqueue_job('foo', 2) worker: Worker = worker(functions=[func(foo, name='foo')]) worker.max_burst_jobs = 0 assert len(worker.tasks) == 0 await worker._poll_iteration() assert len(worker.tasks) == 0
async def test_cant_pickle_result(narq_redis: NarqRedis, worker): class Foobar: def __getstate__(self): raise TypeError("this doesn't pickle") async def foobar(ctx): return Foobar() j1 = await narq_redis.enqueue_job('foobar') w: Worker = worker(functions=[func(foobar, name='foobar')]) await w.main() with pytest.raises(SerializationError, match='unable to serialize result'): await j1.result(pole_delay=0)
async def test_job_retry_dont_retry(narq_redis: NarqRedis, worker, caplog): async def retry(ctx): raise Retry(defer=0.01) caplog.set_level(logging.INFO) await narq_redis.enqueue_job('retry', _job_id='testing') worker: Worker = worker(functions=[func(retry, name='retry')]) with pytest.raises(FailedJobs) as exc_info: await worker.run_check(retry_jobs=False) assert str(exc_info.value) == '1 job failed <Retry defer 0.01s>' assert '↻' not in caplog.text assert '! testing:retry failed, Retry: <Retry defer 0.01s>\n' in caplog.text
async def test_max_bursts_multiple(narq_redis: NarqRedis, worker, caplog): async def foo(ctx, v): return v + 1 caplog.set_level(logging.INFO) await narq_redis.enqueue_job('foo', 1) await narq_redis.enqueue_job('foo', 2) worker: Worker = worker(functions=[func(foo, name='foo')]) assert await worker.run_check(max_burst_jobs=1) == 1 assert worker.jobs_complete == 1 assert worker.jobs_retried == 0 assert worker.jobs_failed == 0 assert 'foo(1)' in caplog.text assert 'foo(2)' not in caplog.text
async def test_return_exception(narq_redis: NarqRedis, worker): async def return_error(ctx): return TypeError('xxx') j = await narq_redis.enqueue_job('return_error') worker: Worker = worker( functions=[func(return_error, name='return_error')]) await worker.main() assert (worker.jobs_complete, worker.jobs_failed, worker.jobs_retried) == (1, 0, 0) r = await j.result(pole_delay=0) assert isinstance(r, TypeError) info = await j.result_info() assert info.success is True
async def test_enqueue_job_nested_custom_serializer(narq_redis_msgpack: NarqRedis, worker): async def foobar(ctx): return 42 async def parent_job(ctx): inner_job = await ctx['redis'].enqueue_job('foobar') return inner_job.job_id job = await narq_redis_msgpack.enqueue_job('parent_job') worker: Worker = worker( functions=[func(parent_job, name='parent_job'), func(foobar, name='foobar')], narq_redis=None, job_serializer=msgpack.packb, job_deserializer=functools.partial(msgpack.unpackb, raw=False), ) await worker.main() result = await job.result(pole_delay=0) assert result is not None inner_job = Job(result, narq_redis_msgpack, _deserializer=functools.partial(msgpack.unpackb, raw=False)) inner_result = await inner_job.result(pole_delay=0) assert inner_result == 42
async def test_non_burst(narq_redis: NarqRedis, worker, caplog, loop): async def foo(ctx, v): return v + 1 caplog.set_level(logging.INFO) await narq_redis.enqueue_job('foo', 1, _job_id='testing') worker: Worker = worker(functions=[func(foo, name='foo')]) worker.burst = False t = loop.create_task(worker.main()) await asyncio.sleep(0.1) t.cancel() assert worker.jobs_complete == 1 assert worker.jobs_retried == 0 assert worker.jobs_failed == 0 assert '← testing:foo ● 2' in caplog.text
async def test_job_retry_max_jobs(narq_redis: NarqRedis, worker, caplog): async def retry(ctx): raise Retry(defer=0.01) caplog.set_level(logging.INFO) await narq_redis.enqueue_job('retry', _job_id='testing') worker: Worker = worker(functions=[func(retry, name='retry')]) assert await worker.run_check(max_burst_jobs=1) == 0 assert worker.jobs_complete == 0 assert worker.jobs_retried == 1 assert worker.jobs_failed == 0 log = re.sub(r'(\d+).\d\ds', r'\1.XXs', caplog.text) assert '0.XXs ↻ testing:retry retrying job in 0.XXs\n' in log assert '0.XXs → testing:retry() try=2\n' not in log
async def test_retry_lots(narq_redis: NarqRedis, worker, caplog): async def retry(ctx): raise Retry() caplog.set_level(logging.INFO) await narq_redis.enqueue_job('retry', _job_id='testing') worker: Worker = worker(functions=[func(retry, name='retry')]) await worker.main() assert worker.jobs_complete == 0 assert worker.jobs_failed == 1 assert worker.jobs_retried == 5 log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records)) assert ' X.XXs ! testing:retry max retries 5 exceeded' in log
async def test_retry_job_error(narq_redis: NarqRedis, worker, caplog): async def retry(ctx): if ctx['job_try'] == 1: raise RetryJob() caplog.set_level(logging.INFO) await narq_redis.enqueue_job('retry', _job_id='testing') worker: Worker = worker(functions=[func(retry, name='retry')]) await worker.main() assert worker.jobs_complete == 1 assert worker.jobs_failed == 0 assert worker.jobs_retried == 1 log = re.sub(r'\d+.\d\ds', 'X.XXs', '\n'.join(r.message for r in caplog.records)) assert 'X.XXs ↻ testing:retry cancelled, will be run again' in log
async def test_unpickleable(narq_redis: NarqRedis, worker, caplog): caplog.set_level(logging.INFO) class Foo: pass async def example(ctx): return Foo() await narq_redis.enqueue_job('example', _job_id='testing') worker: Worker = worker(functions=[func(example, name='example')]) await worker.main() log = re.sub(r'(\d+).\d\ds', r'\1.XXs', '\n'.join(r.message for r in caplog.records)) assert 'error serializing result of testing:example' in log
async def test_max_jobs_completes(narq_redis: NarqRedis, worker): v = 0 async def raise_second_time(ctx): nonlocal v v += 1 if v > 1: raise ValueError('xxx') await narq_redis.enqueue_job('raise_second_time') await narq_redis.enqueue_job('raise_second_time') await narq_redis.enqueue_job('raise_second_time') worker: Worker = worker( functions=[func(raise_second_time, name='raise_second_time')]) with pytest.raises(FailedJobs) as exc_info: await worker.run_check(max_burst_jobs=3) assert repr(exc_info.value).startswith('<2 jobs failed:')
async def test_job_retry(narq_redis: NarqRedis, worker, caplog): async def retry(ctx): if ctx['job_try'] <= 2: raise Retry(defer=0.01) caplog.set_level(logging.INFO) await narq_redis.enqueue_job('retry', _job_id='testing') worker: Worker = worker(functions=[func(retry, name='retry')]) await worker.main() assert worker.jobs_complete == 1 assert worker.jobs_failed == 0 assert worker.jobs_retried == 2 log = re.sub(r'(\d+).\d\ds', r'\1.XXs', '\n'.join(r.message for r in caplog.records)) assert '0.XXs ↻ testing:retry retrying job in 0.XXs\n' in log assert '0.XXs → testing:retry() try=2\n' in log assert '0.XXs ← testing:retry ●' in log
async def test_mung(narq_redis: NarqRedis, worker): """ check a job can't be enqueued multiple times with the same id """ counter = Counter() async def count(ctx, v): counter[v] += 1 tasks = [] for i in range(50): tasks.extend( [narq_redis.enqueue_job('count', i, _job_id=f'v-{i}'), narq_redis.enqueue_job('count', i, _job_id=f'v-{i}')] ) shuffle(tasks) await asyncio.gather(*tasks, return_exceptions=True) worker: Worker = worker(functions=[func(count, name='count')]) await worker.main() assert counter.most_common(1)[0][1] == 1 # no job go enqueued twice
async def test_set_health_check_key(narq_redis: NarqRedis, worker): await narq_redis.enqueue_job('foobar', _job_id='testing') worker: Worker = worker(functions=[func(foobar, keep_result=0)], health_check_key='narq:test:health-check') await worker.main() assert sorted(await narq_redis.keys('*')) == ['narq:test:health-check']
class Settings: functions = [func(foobar, name='foobar')] burst = True poll_delay = 0 queue_read_limit = 10
async def test_run_check_passes(narq_redis: NarqRedis, worker): await narq_redis.enqueue_job('foobar') await narq_redis.enqueue_job('foobar') worker: Worker = worker(functions=[func(foobar, name='foobar')]) assert 2 == await worker.run_check()
async def test_run_check_error(narq_redis: NarqRedis, worker): await narq_redis.enqueue_job('fails') worker: Worker = worker(functions=[func(fails, name='fails')]) with pytest.raises(FailedJobs, match=r"1 job failed TypeError\('my type error'"): await worker.run_check()