def test_replay_filter(app, broker, failing_job): @app.task def mytask(): return "1" for _ in range(10): job = random_job(tries=1, max_retries=0, status=DEAD, task=mytask.name) app.client.hmset(app.keys.status(job), job.serialise()) app.client.xadd(app.keys.dead, {"uuid": job.uuid}) @app.task def anothertask(): return "2" for _ in range(5): job = random_job(tries=4, max_retries=3, status=DEAD, task=anothertask.name) app.client.hmset(app.keys.status(job), job.serialise()) app.client.xadd(app.keys.dead, {"uuid": job.uuid}) state = get_state(app) assert len(state.queue.messages) == 0 assert len(state.dead.messages) == 15 results = replay_dead(app, filter=lambda job: job.task.endswith("mytask")) assert len(results) == 10 state = get_state(app) assert len(state.queue.messages) == 10 assert len(state.dead.messages) == 5
def test_dead_e2e(): app = App( name="testapp", retry_backoff=lambda retries: 0.01, schedule_interval=0.1, heartbeat_interval=0.1, maintenance_interval=0.1, processes=1, concurrency=4, prefetch_count=1, ) @app.task(retries=0) def example(): raise Chaos(f"Task failure") x = example.delay() # Process the queue, move the failure to the DLQ. with worker(app): state = wait_for_results(app, length=1, sleep=0.02, maxwait=1) assert len(state.dead.messages) == 1 assert len(state.queue.messages) == 0 assert get_job(app, x.job.uuid).max_retries == 0 assert count_results(app) == 1 # Process the DLQ, move the tasks back to the main queue. replay_dead(app) state = get_state(app) assert len(state.dead.messages) == 0 assert len(state.queue.messages) == 1 assert get_job(app, x.job.uuid).max_retries == 0 assert count_results(app) == 1
def wait_for_results(app, length, sleep=0.01, maxwait=1): assert sleep <= maxwait tries = maxwait // sleep while tries and not count_results(app) == length: time.sleep(sleep) tries -= 1 return get_state(app)
async def test_ack(app, broker, message, xid, consumer_id, job): await broker.read(consumer_id, count=1) state = get_state(app) assert len(state.queue.messages) == 1 assert state.queue.groups[0].pending == 1 assert len(state.schedule) == 0 assert len(state.dead.messages) == 0 assert count_results(app) == 0 assert get_status(app, job.uuid) == status.SENT await broker.ack(xid, job) state = get_state(app) assert len(state.queue.messages) == 0 assert state.queue.groups[0].pending == 0 assert len(state.schedule) == 0 assert len(state.dead.messages) == 0 assert count_results(app) == 0 assert get_status(app, job.uuid) == status.SUCCESS
async def test_maintenance(app, broker, messages, executor_id, consumer_id): with freeze_time("2020-01-01 00:00:00"): await broker.read(consumer_id, count=5) await broker.heartbeat(executor_id) state = get_state(app) xids = {m.id for m in state.queue.messages} assert len(state.queue.messages) == 10 assert state.queue.groups[0].pending == 5 assert state.queue.groups[0].consumers == 1 assert len(state.schedule) == 0 assert len(state.dead.messages) == 0 assert len(state.heartbeats) == 1 assert count_results(app) == 0 with freeze_time( "2020-01-01 00:00:30"): # 30 seconds later, not passed threshold await broker.maintenance(threshold=59) state = get_state(app) new_xids = {m.id for m in state.queue.messages} assert new_xids == xids assert len(state.queue.messages) == 10 assert state.queue.groups[0].pending == 5 assert state.queue.groups[0].consumers == 1 assert len(state.schedule) == 0 assert len(state.dead.messages) == 0 assert len(state.heartbeats) == 1 assert count_results(app) == 0 with freeze_time( "2020-01-01 00:01:00"): # 1 minute later, passed threshold await broker.maintenance(threshold=59) state = get_state(app) new_xids = {m.id for m in state.queue.messages} assert new_xids != xids assert len(state.queue.messages) == 10 assert state.queue.groups[0].pending == 0 assert state.queue.groups[0].consumers == 0 assert len(state.schedule) == 0 assert len(state.dead.messages) == 0 assert len(state.heartbeats) == 0 assert count_results(app) == 0
def test_purge(app, broker, failing_job, dead_job): results = purge_dead(app) assert len(results) == 1 state = get_state(app) assert len(state.queue.messages) == 0 assert len(state.dead.messages) == 0 with pytest.raises(JobNotFound): get_job(app, failing_job.uuid)
async def test_process_schedule(app, broker, jobs, messages): with freeze_time("2020-01-01"): for xid, job in jobs: job.status = EXECUTING await broker.ack_and_schedule(xid, job) state = get_state(app) assert len(state.queue.messages) == 0 assert state.queue.groups[0].pending == 0 assert len(state.schedule) == 10 assert len(state.dead.messages) == 0 assert count_results(app) == 0 assert all(get_status(app, job.uuid) == RETRY for _, job in jobs) with freeze_time("1970-01-01"): # We're before the schedule time, no jobs should have moved. scheduled = await broker.process_schedule() assert len(scheduled) == 0 state = get_state(app) assert len(state.queue.messages) == 0 assert state.queue.groups[0].pending == 0 assert len(state.schedule) == 10 assert len(state.dead.messages) == 0 assert count_results(app) == 0 assert all(get_status(app, job.uuid) == RETRY for _, job in jobs) with freeze_time("2100-01-01"): # After the schedule time, all jobs should be moved to the task queue. scheduled = await broker.process_schedule() assert len(scheduled) == 10 state = get_state(app) assert len(state.queue.messages) == 10 assert {x.uuid for x in state.queue.messages} == {y.uuid for (_, y) in jobs} assert state.queue.groups[0].pending == 0 assert len(state.schedule) == 0 assert len(state.dead.messages) == 0 assert count_results(app) == 0 assert all(get_status(app, job.uuid) == RETRY for _, job in jobs)
async def test_ack_and_dead(app, broker, failing_message, xid, consumer_id, failing_job): await broker.read(consumer_id, count=1) state = get_state(app) assert len(state.queue.messages) == 1 assert state.queue.groups[0].pending == 1 assert len(state.schedule) == 0 assert len(state.dead.messages) == 0 assert count_results(app) == 0 assert get_status(app, failing_job.uuid) == status.SENT await broker.ack_and_dead(xid, failing_job) state = get_state(app) assert len(state.queue.messages) == 0 assert state.queue.groups[0].pending == 0 assert len(state.schedule) == 0 assert state.dead.messages[0].uuid == failing_job.uuid assert len(state.dead.messages) == 1 assert count_results(app) == 1 assert get_status(app, failing_job.uuid) == status.DEAD
def test_replay(app, broker, failing_job, dead_job): results = replay_dead(app) assert len(results) == 1 state = get_state(app) assert len(state.queue.messages) == 1 assert len(state.dead.messages) == 0 job = get_job(app, failing_job.uuid) assert job.tries == 1 assert job.max_retries == 0 assert job.status == SENT
def dead_job(app, failing_job): failing_job.tries = 1 failing_job.max_retries = 0 failing_job.status = DEAD app.client.hmset(app.keys.status(failing_job), failing_job.serialise()) app.client.xadd(app.keys.dead, {"uuid": failing_job.uuid}) state = get_state(app) assert len(state.queue.messages) == 0 assert len(state.dead.messages) == 1 job = get_job(app, failing_job.uuid) assert job.tries == 1 assert job.max_retries == 0 assert job.status == DEAD
def info(application: str) -> None: """ Print a JSON-encoded summary of application state. """ click.echo(render(get_state(get_object(application))))