def test_retry_failed_task( app1, job_id1, job_id2, log, tasks_json_tmpfile): """ Retry failed tasks up to max num retries and then remove self from queue Tasks should maintain proper task state throughout. """ # create 2 tasks in same queue enqueue(app1, job_id1) enqueue(app1, job_id2, validate_queued=False) nose.tools.assert_equal(2, get_qb_status(app1, job_id1)['app_qsize']) nose.tools.assert_equal(job_id1, cycle_queue(app1)) # run job_id2 and have it fail run_code( log, tasks_json_tmpfile, app1, extra_opts='--bash_cmd "&& notacommand...fail" ') # ensure we still have both items in the queue nose.tools.assert_true(get_qb_status(app1, job_id1)['in_queue']) nose.tools.assert_true(get_qb_status(app1, job_id2)['in_queue']) # ensure the failed task is sent to back of the queue nose.tools.assert_equal(2, get_qb_status(app1, job_id1)['app_qsize']) nose.tools.assert_equal(job_id1, cycle_queue(app1)) # run and fail n times, where n = max failures run_code( log, tasks_json_tmpfile, app1, extra_opts='--max_retry 1 --bash_cmd "&& notacommand...fail"') # verify that job_id2 is removed from queue validate_one_queued_task(app1, job_id1) # verify that job_id2 state is 'failed' and job_id1 is still pending validate_one_failed_task(app1, job_id2)
def test_maybe_add_subtask(app1, job_id1, job_id2, job_id3): # we don't queue anything if we request queue=False, but we create data for # this node if it doesn't exist tt.validate_zero_queued_task(app1) api.maybe_add_subtask(app1, job_id1, queue=False) tt.validate_zero_queued_task(app1) # data for this job_id exists, so it can't get queued api.maybe_add_subtask(app1, job_id1, priority=4) tt.validate_zero_queued_task(app1) api.maybe_add_subtask(app1, job_id2, priority=8) tt.validate_one_queued_task(app1, job_id2) api.maybe_add_subtask(app1, job_id3, priority=5) # this should have no effect because it's already queued with priority=5 api.maybe_add_subtask(app1, job_id3, priority=9) job_id = tt.cycle_queue(app1) nt.assert_equal(job_id3, job_id)