def job(): task_func = Mock() job = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 10, task_args=(1, 2), task_kwargs={'foo': 'bar'}) job.task_func = task_func return job, task_func
def test_cant_exceed_max_concurrency(broker): set_concurrency_keys(broker) # An idempotent job is required. job1 = Job( CONCURRENT_TASK_NAME, 'foo_queue', datetime.now(timezone.utc), 1, # kwargs help with debugging but are not part of the test. task_kwargs=dict(name='job1'), ) job2 = Job( CONCURRENT_TASK_NAME, 'foo_queue', datetime.now(timezone.utc), 1, task_kwargs=dict(name='job2'), ) broker.enqueue_jobs([job1, job2]) returned_jobs = broker.get_jobs_from_queue('foo_queue', 2) assert len(returned_jobs) == CONCURRENT_TASK_MAX_CONCURRENCY assert returned_jobs[0].task_kwargs == dict(name='job1') # Check the current concurrency was set properly. current = broker._r.hget(broker._to_namespaced(CURRENT_CONCURRENCY_KEY), CONCURRENT_TASK_NAME) assert current == b'1' # Make sure job2 was left alone in the queue. queued = broker._r.lpop(broker._to_namespaced('foo_queue')) assert json.loads(queued.decode())['id'] == str(job2.id)
def test_flush(broker): broker.enqueue_jobs([ Job('t1', 'q1', get_now(), 0), Job('t2', 'q2', get_now() + timedelta(seconds=10), 0) ]) broker.flush() assert broker.get_jobs_from_queue('q1', 1) == [] assert broker.next_future_job_delta is None
def test_idempotency_token(_, broker): job_1 = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 0) job_2 = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 0) broker.enqueue_jobs([job_1]) broker.enqueue_jobs([job_2]) jobs = broker.get_jobs_from_queue('foo_queue', max_jobs=10) job_1.status = JobStatus.RUNNING assert jobs == [job_1]
def test_get_jobs_from_queue_limits_concurrency(broker): task = Task(print, 'foo', 'q1', 10, None, max_concurrency=1) broker.set_concurrency_keys([task]) job1 = Job('foo', 'q1', datetime.now(timezone.utc), 10) job2 = Job('foo', 'q1', datetime.now(timezone.utc), 10) broker.enqueue_jobs([job1, job2]) # Try to get one more than it allows. jobs = broker.get_jobs_from_queue('q1', 2) assert len(jobs) == 1
def test_normal_job(broker): job = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 0, task_args=(1, 2), task_kwargs={'foo': 'bar'}) broker.enqueue_jobs([job]) assert job.status == JobStatus.QUEUED job.status = JobStatus.RUNNING assert broker.get_jobs_from_queue('foo_queue', 5) == [job] assert broker.get_jobs_from_queue('foo_queue', 1) == []
def test_get_jobs_from_queue_returns_all_requested(broker): # If a job is not returned because it was over concurrency limits, # make sure the number of jobs requested is filled from other jobs # and that the over-limit one is left alone in the queue. set_concurrency_keys(broker) jobs = [ Job(CONCURRENT_TASK_NAME, 'foo_queue', datetime.now(timezone.utc), 1), Job(CONCURRENT_TASK_NAME, 'foo_queue', datetime.now(timezone.utc), 1), Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 1), ] broker.enqueue_jobs(jobs) returned_jobs = broker.get_jobs_from_queue('foo_queue', 2) assert len(returned_jobs) == CONCURRENT_TASK_MAX_CONCURRENCY + 1
def test_get_jobs_from_queue_re_adds_jobs_if_over_limit(broker): task = Task(print, 'foo', 'q1', 10, None, max_concurrency=1) broker.set_concurrency_keys([task]) job1 = Job('foo', 'q1', datetime.now(timezone.utc), 10) job2 = Job('foo', 'q1', datetime.now(timezone.utc), 10) broker.enqueue_jobs([job1, job2]) # Try to get one more than it allows. [running_job] = broker.get_jobs_from_queue('q1', 2) # Pop what's left in the broker's Queue and inspect it. job_json_string = broker._get_queue('q1').get(block=False) queued_job = Job.deserialize(job_json_string) assert queued_job != running_job
def test_running_job(broker): running_jobs_key = broker._to_namespaced( RUNNING_JOBS_KEY.format(broker._id)) # Non-idempotent job job = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 0) broker.enqueue_jobs([job]) assert broker._r.hget(running_jobs_key, str(job.id)) is None broker.get_jobs_from_queue('foo_queue', 1) assert broker._r.hget(running_jobs_key, str(job.id)) is None # Try to remove it, even if it doesn't exist in running broker.remove_job_from_running(job) # Idempotent job - get from queue job = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 10) broker.enqueue_jobs([job]) assert broker._r.hget(running_jobs_key, str(job.id)) is None broker.get_jobs_from_queue('foo_queue', 1) job.status = JobStatus.RUNNING assert (Job.deserialize( broker._r.hget(running_jobs_key, str(job.id)).decode()) == job) # Idempotent job - re-enqueue after job ran with error job.retries += 1 broker.enqueue_jobs([job]) assert broker._r.hget(running_jobs_key, str(job.id)) is None broker.get_jobs_from_queue('foo_queue', 1) job.status = JobStatus.RUNNING assert (Job.deserialize( broker._r.hget(running_jobs_key, str(job.id)).decode()) == job) # Idempotent job - job succeeded broker.remove_job_from_running(job) assert broker._r.hget(running_jobs_key, str(job.id)) is None assert broker.get_jobs_from_queue('foo_queue', 1) == []
def job(patch_now): job = Job('foo_task', 'foo_queue', get_now(), 5, task_args=(1, 2), task_kwargs={'foo': 'bar'}) return job
def test_at_timezone_naive(): now_naive = datetime.utcnow() job = Job('foo_task', 'foo_queue', now_naive, 5, task_args=(1, 2), task_kwargs={'foo': 'bar'}) assert job.at.tzinfo is timezone.utc
def test_decrements_concurrency_count_when_job_ends(broker): task = Task(print, 'foo', 'q1', 10, None, max_concurrency=1) broker.set_concurrency_keys([task]) job1 = Job('foo', 'q1', datetime.now(timezone.utc), 10) job2 = Job('foo', 'q1', datetime.now(timezone.utc), 10) broker.enqueue_jobs([job1, job2]) # Start the first job. running_jobs = broker.get_jobs_from_queue('q1', 2) assert 1 == len(running_jobs) # No more can start. assert 0 == len(broker.get_jobs_from_queue('q1', 2)) # Complete the first job. broker.remove_job_from_running(running_jobs[0]) # Start second job now first has finished. assert 1 == len(broker.get_jobs_from_queue('q1', 2))
def test_decrements_concurrency_count_when_job_fails(broker): set_concurrency_keys(broker) job = Job( CONCURRENT_TASK_NAME, 'foo_queue', datetime.now(timezone.utc), max_retries=10, ) broker.enqueue_jobs([job]) broker.get_jobs_from_queue('foo_queue', 1) current = broker._r.hget(broker._to_namespaced(CURRENT_CONCURRENCY_KEY), CONCURRENT_TASK_NAME) assert current == b'1' job.status = JobStatus.NOT_SET job.retries += 1 broker.enqueue_jobs([job], from_failure=True) current = broker._r.hget(broker._to_namespaced(CURRENT_CONCURRENCY_KEY), CONCURRENT_TASK_NAME) assert current == b'0'
def test_enqueue_jobs_from_dead_broker(broker, broker_2): # Enqueue one idempotent job and one non-idempotent job job_1 = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 0) job_2 = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 10) broker.enqueue_jobs([job_1, job_2]) # Simulate broker starting the jobs broker.get_jobs_from_queue('foo_queue', 100) # Mark broker as dead, should re-enqueue only the idempotent job assert broker_2.enqueue_jobs_from_dead_broker(broker._id) == 1 # Simulate broker 2 getting jobs from the queue job_2.status = JobStatus.RUNNING job_2.retries = 1 assert broker_2.get_jobs_from_queue('foo_queue', 100) == [job_2] # Check that a broker can be marked as dead multiple times # without duplicating jobs assert broker_2.enqueue_jobs_from_dead_broker(broker._id) == 0
def test_decrements_concurrency_count_when_job_ends(broker): set_concurrency_keys(broker) job = Job( CONCURRENT_TASK_NAME, 'foo_queue', datetime.now(timezone.utc), 1, # kwargs help with debugging but are not part of the test. task_kwargs=dict(name='job1'), ) broker.enqueue_jobs([job]) returned_jobs = broker.get_jobs_from_queue('foo_queue', 2) assert len(returned_jobs) == CONCURRENT_TASK_MAX_CONCURRENCY current = broker._r.hget(broker._to_namespaced(CURRENT_CONCURRENCY_KEY), CONCURRENT_TASK_NAME) assert current == b'1' broker.get_jobs_from_queue('foo_queue', 1) job.status = JobStatus.RUNNING broker.remove_job_from_running(job) current = broker._r.hget(broker._to_namespaced(CURRENT_CONCURRENCY_KEY), CONCURRENT_TASK_NAME) assert current == b'0'
def test_future_job(broker, patch_now): assert broker.next_future_job_delta is None assert broker.move_future_jobs() == 0 job = Job('foo_task', 'foo_queue', get_now() + timedelta(minutes=10), 0, task_args=(1, 2), task_kwargs={'foo': 'bar'}) broker.enqueue_jobs([job]) assert job.status == JobStatus.WAITING assert broker.get_jobs_from_queue('foo_queue', 5) == [] assert broker.next_future_job_delta == 600 assert broker.move_future_jobs() == 0 set_now(datetime(2017, 9, 2, 9, 00, 56, 482169)) assert broker.next_future_job_delta == 0 assert broker.move_future_jobs() == 1 job.status = JobStatus.RUNNING assert broker.get_jobs_from_queue('foo_queue', 5) == [job]
def test_enqueue_jobs_from_dead_broker(broker, broker_2): set_concurrency_keys(broker) # Enqueue one idempotent job, one non-idempotent job, and another # that has a max_concurrency. job_1 = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 0) job_2 = Job('foo_task', 'foo_queue', datetime.now(timezone.utc), 10) job_3 = Job(CONCURRENT_TASK_NAME, 'foo_queue', datetime.now(timezone.utc), 10) broker.enqueue_jobs([job_1, job_2, job_3]) # Simulate broker starting the jobs broker.get_jobs_from_queue('foo_queue', 100) # Check the current_concurrency current = broker._r.hget(broker._to_namespaced(CURRENT_CONCURRENCY_KEY), CONCURRENT_TASK_NAME) assert current == b'1' # Mark broker as dead, should re-enqueue only the idempotent jobs. assert broker_2.enqueue_jobs_from_dead_broker(broker._id) == 2 # Check that the current_concurrency was decremented for job_3. current = broker._r.hget(broker._to_namespaced(CURRENT_CONCURRENCY_KEY), CONCURRENT_TASK_NAME) assert current == b'0' # Simulate broker 2 getting jobs from the queue job_2.status = job_3.status = JobStatus.RUNNING job_2.retries = job_3.retries = 1 result = sorted(broker_2.get_jobs_from_queue('foo_queue', 100), key=operator.attrgetter('id')) expected = sorted([job_2, job_3], key=operator.attrgetter('id')) assert result == expected # Check that a broker can be marked as dead multiple times # without duplicating jobs assert broker_2.enqueue_jobs_from_dead_broker(broker._id) == 0
def test_job_ran(broker): now = datetime.now(timezone.utc) job = Job('foo_task', 'foo_queue', now, 0, task_args=(1, 2), task_kwargs={'foo': 'bar'}) job.status = JobStatus.RUNNING broker.job_ran(job, None) assert job.status is JobStatus.SUCCEEDED job.status = JobStatus.RUNNING broker.job_ran(job, RuntimeError('Error')) assert job.status is JobStatus.FAILED job.status = JobStatus.RUNNING job.max_retries = 10 broker.job_ran(job, RuntimeError('Error')) assert job.status is JobStatus.WAITING assert job.at > now
def test_serialization(job): job.status = JobStatus.QUEUED job.retries = 2 job_json = job.serialize() assert Job.deserialize(job_json) == job
def test_wait_for_events_with_future_job(broker, patch_now, delta, timeout): broker.enqueue_jobs([Job('foo_task', 'foo_queue', get_now() + delta, 0)]) with patch.object(broker, '_something_happened') as mock_sh: broker.wait_for_event() mock_sh.wait.assert_called_once_with(timeout=timeout)