def test_interrupt_worker_gracefully(worker, p_flags): """ Test what happens when we interrupt a running worker gracefully. """ worker.start(flags=p_flags) job_id = worker.send_task( "tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 5}, block=False) time.sleep(2) job = Job(job_id).fetch().data assert job["status"] == "started" # Stop the worker gracefully. first job should still finish! worker.stop(block=False, deps=False) time.sleep(1) # Should not be accepting new jobs! job_id2 = worker.send_task( "tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 4}, block=False) time.sleep(1) job = Job(job_id2).fetch().data assert job.get("status") == "queued" time.sleep(4) job = Job(job_id).fetch().data assert job["status"] == "success" assert job["result"] == 42 job = Job(job_id2).fetch().data assert job.get("status") == "queued"
def wait_for_tasks_results(self, job_ids, block=True, accept_statuses=["success"]): if not block: return job_ids results = [] for job_id in job_ids: job = Job(job_id).wait(poll_interval=0.01) assert job.get("status") in accept_statuses, "Job had status %s, not in %s. Dump: %s" % (job.get("status"), accept_statuses, job) results.append(job.get("result")) return results
def wait_for_tasks_results(self, job_ids, block=True, accept_statuses=["success"]): if not block: return job_ids results = [] for job_id in job_ids: job = Job(job_id).wait(poll_interval=0.01) assert job.get("status") in accept_statuses, "Job had status %s, not in %s. Dump: %s" % ( job.get("status"), accept_statuses, job) results.append(job.get("result")) return results
def test_interrupt_maxconcurrency(worker): # The worker will raise a maxconcurrency on the second job worker.start(flags="--greenlets=2") job_ids = worker.send_tasks("tests.tasks.concurrency.LockedAdd", [{ "a": i, "b": 1, "sleep": 2 } for i in range(2)], block=False) worker.wait_for_tasks_results( job_ids, accept_statuses=["success", "failed", "maxconcurrency"]) job_statuses = [Job(job_id).fetch().data["status"] for job_id in job_ids] assert set(job_statuses) == set(["success", "maxconcurrency"]) # the job concurrency key must be equal to 0 last_job_id = worker.send_task("tests.tasks.concurrency.LockedAdd", { "a": 1, "b": 1, "sleep": 2 }, block=False) last_job = Job(last_job_id).wait(poll_interval=0.01) assert last_job.get("status") == "success"
def test_cancel_by_path(worker): # Start the worker with only one greenlet so that tasks execute sequentially worker.start(flags="--gevent 1") job_id1 = worker.send_task("tests.tasks.general.MongoInsert", {"a": 41, "sleep": 2}, block=False) worker.send_task("mrq.basetasks.utils.JobAction", { "path": "tests.tasks.general.MongoInsert", "status": "queued", "action": "cancel" }, block=False) job_id2 = worker.send_task("tests.tasks.general.MongoInsert", {"a": 43}, block=False) Job(job_id2).wait(poll_interval=0.01) # Leave some time to unqueue job_id2 without executing. time.sleep(1) worker.stop(deps=False) job1 = Job(job_id1).fetch().data job2 = Job(job_id2).fetch().data assert job1["status"] == "success" assert job1["result"] == {"a": 41, "sleep": 2} assert job2["status"] == "cancel" assert job2["dateexpires"] > job2["dateupdated"] assert job2.get("result") is None assert worker.mongodb_logs.tests_inserts.count() == 1 assert Queue("default").size() == 0
def test_interrupt_worker_double_sigint(worker, p_flags): """ Test what happens when we interrupt a running worker with 2 SIGINTs. """ start_time = time.time() worker.start(flags=p_flags) job_id = worker.send_task("tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 20}, block=False) while Job(job_id).fetch().data["status"] == "queued": time.sleep(0.1) job = Job(job_id).fetch().data assert job["status"] == "started" # Stop the worker gracefully. first job should still finish! worker.stop(block=False, deps=False) time.sleep(1) # Should not be accepting new jobs! job_id2 = worker.send_task("tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 20}, block=False) time.sleep(1) job2 = Job(job_id2).fetch().data assert job2.get("status") == "queued" job = Job(job_id).fetch().data assert job["status"] == "started" # Sending a second kill -2 should make it stop worker.stop(block=True, deps=False, force=True) while Job(job_id).fetch().data["status"] == "started": time.sleep(0.1) job = Job(job_id).fetch().data assert job["status"] == "interrupt" assert time.time() - start_time < 15 # Then try the cleaning task that requeues interrupted jobs assert Queue("default").size() == 1 worker.start(queues="cleaning", deps=False, flush=False) res = worker.send_task("mrq.basetasks.cleaning.RequeueInterruptedJobs", {}, block=True, queue="cleaning") assert res["requeued"] == 1 assert Queue("default").size() == 2 Queue("default").list_job_ids() == [str(job_id2), str(job_id)] job = Job(job_id).fetch().data assert job["status"] == "queued" assert job["queue"] == "default"
def test_interrupt_worker_double_sigint(worker, p_flags): """ Test what happens when we interrupt a running worker with 2 SIGINTs. """ start_time = time.time() worker.start(flags=p_flags) job_id = worker.send_task( "tests.tasks.general.Add", {"a": 41, "b": 1, "sleep": 10}, block=False) time.sleep(1) job = Job(job_id).fetch().data assert job["status"] == "started" # Stop the worker gracefully. first job should still finish! worker.stop(block=False, deps=False) time.sleep(1) # Should not be accepting new jobs! job_id2 = worker.send_task( "tests.tasks.general.Add", {"a": 42, "b": 1, "sleep": 10}, block=False) time.sleep(1) job = Job(job_id2).fetch().data assert job.get("status") == "queued" # Sending a second kill -2 should make it stop worker.stop(block=True, deps=False, force=True) time.sleep(1) job = Job(job_id).fetch().data assert job["status"] == "interrupt" assert time.time() - start_time < 8 # Then try the cleaning task that requeues interrupted jobs assert Queue("default").size() == 1 worker.start(queues="cleaning", deps=False, flush=False) res = worker.send_task( "mrq.basetasks.cleaning.RequeueInterruptedJobs", {}, block=True, queue="cleaning") assert res["requeued"] == 1 assert Queue("default").size() == 2 Queue("default").list_job_ids() == [str(job_id2), str(job_id)] job = Job(job_id).fetch().data assert job["status"] == "queued" assert job["queue"] == "default"
def test_retry(worker): job_id = worker.send_task("mrq.basetasks.tests.general.Retry", {"queue": "noexec", "countdown": 60}, block=False) job_data = Job(job_id).wait(poll_interval=0.01, full_data=True) assert job_data["queue"] == "noexec" assert job_data["status"] == "retry" assert job_data["dateretry"] > datetime.datetime.utcnow() assert job_data.get("result") is None
def test_retry_cancel_on_retry(worker): job_id = worker.send_task("mrq.basetasks.tests.general.Retry", { "queue": "noexec", "countdown": 60, "cancel_on_retry": True }, block=False) job_data = Job(job_id).wait(poll_interval=0.01, full_data=True) assert job_data["status"] == "cancel" assert job_data["queue"] == "default" assert job_data.get("result") is None
def test_retry(worker): job_id = worker.send_task("tests.tasks.general.Retry", { "queue": "noexec", "delay": 60 }, block=False) job_data = Job(job_id).wait(poll_interval=0.01, full_data=True) assert job_data["queue"] == "noexec" assert job_data["status"] == "retry" assert job_data["dateretry"] > datetime.datetime.utcnow() assert job_data.get("result") is None
def test_cancel_by_path(worker): # Start the worker with only one greenlet so that tasks execute # sequentially worker.start(flags="--greenlets 1") job_id1 = worker.send_task("tests.tasks.general.MongoInsert", { "a": 41, "sleep": 2 }, block=False) worker.wait_for_idle() job_id2 = worker.send_task("tests.tasks.general.MongoInsert", {"a": 43}, block=False, queue="testMrq") worker.send_task("mrq.basetasks.utils.JobAction", { "path": "tests.tasks.general.MongoInsert", "status": "queued", "action": "cancel" }, block=False) worker.wait_for_idle() # Leave some time to unqueue job_id2 without executing. time.sleep(1) worker.stop(deps=False) job1 = Job(job_id1).fetch().data job2 = Job(job_id2).fetch().data assert job1["status"] == "success" assert job1["result"] == {"a": 41, "sleep": 2} assert job2["status"] == "cancel" assert job2["dateexpires"] > job2["dateupdated"] assert job2.get("result") is None assert worker.mongodb_jobs.tests_inserts.count() == 1 assert Queue("default").size() == 0 worker.stop_deps()
def test_interrupt_maxconcurrency(worker): # The worker will raise a maxconcurrency on the second job worker.start(flags="--greenlets=2") job_ids = worker.send_tasks( "tests.tasks.concurrency.LockedAdd", [{"a": i, "b": 1, "sleep": 2} for i in range(2)], block=False ) worker.wait_for_tasks_results(job_ids, accept_statuses=["success", "failed", "maxconcurrency"]) job_statuses = [Job(job_id).fetch().data["status"] for job_id in job_ids] assert set(job_statuses) == set(["success", "maxconcurrency"]) # the job concurrency key must be equal to 0 last_job_id = worker.send_task("tests.tasks.concurrency.LockedAdd", {"a": 1, "b": 1, "sleep": 2}, block=False) last_job = Job(last_job_id).wait(poll_interval=0.01) assert last_job.get("status") == "success"
def test_retry(worker): worker.start(flags="--config tests/fixtures/config-scheduler8.py") job_id = worker.send_task("tests.tasks.general.Retry", { "queue": "noexec", "delay": 60 }, block=False) job_data = Job(job_id).wait(poll_interval=0.01, full_data=True) assert job_data["queue"] == "noexec" assert job_data["status"] == "retry" assert job_data["dateretry"] > datetime.datetime.utcnow() assert datetime.datetime.utcnow() + datetime.timedelta( days=1) < job_data["dateexpires"] < datetime.datetime.utcnow( ) + datetime.timedelta(days=3) assert job_data.get("result") is None