def test_job_retry(self): """Test job.retry() works properly""" queue = Queue(connection=self.testconn) retry = Retry(max=3, interval=5) job = queue.enqueue(div_by_zero, retry=retry) with self.testconn.pipeline() as pipeline: job.retry(queue, pipeline) pipeline.execute() self.assertEqual(job.retries_left, 2) # status should be scheduled since it's retried with 5 seconds interval self.assertEqual(job.get_status(), JobStatus.SCHEDULED) retry = Retry(max=3) job = queue.enqueue(div_by_zero, retry=retry) with self.testconn.pipeline() as pipeline: job.retry(queue, pipeline) pipeline.execute() self.assertEqual(job.retries_left, 2) # status should be queued self.assertEqual(job.get_status(), JobStatus.QUEUED)
def test_handle_retry(self): """handle_job_failure() handles retry properly""" connection = self.testconn queue = Queue(connection=connection) retry = Retry(max=2) job = queue.enqueue(div_by_zero, retry=retry) registry = FailedJobRegistry(queue=queue) worker = Worker([queue]) # If job if configured to retry, it will be put back in the queue # and not put in the FailedJobRegistry. # This is the original execution queue.empty() worker.handle_job_failure(job, queue) job.refresh() self.assertEqual(job.retries_left, 1) self.assertEqual([job.id], queue.job_ids) self.assertFalse(job in registry) # First retry queue.empty() worker.handle_job_failure(job, queue) job.refresh() self.assertEqual(job.retries_left, 0) self.assertEqual([job.id], queue.job_ids) # Second retry queue.empty() worker.handle_job_failure(job, queue) job.refresh() self.assertEqual(job.retries_left, 0) self.assertEqual([], queue.job_ids) # If a job is no longer retries, it's put in FailedJobRegistry self.assertTrue(job in registry)
def test_enqueue_in_with_retry(self): """ Ensure that the retry parameter is passed to the enqueue_at function from enqueue_in. """ queue = Queue(connection=self.testconn) job = queue.enqueue_in(timedelta(seconds=30), say_hello, retry=Retry(3, [2])) self.assertEqual(job.retries_left, 3) self.assertEqual(job.retry_intervals, [2])
def test_retry(self): """Retry parses `max` and `interval` correctly""" retry = Retry(max=1) self.assertEqual(retry.max, 1) self.assertEqual(retry.intervals, [0]) self.assertRaises(ValueError, Retry, max=0) retry = Retry(max=2, interval=5) self.assertEqual(retry.max, 2) self.assertEqual(retry.intervals, [5]) retry = Retry(max=3, interval=[5, 10]) self.assertEqual(retry.max, 3) self.assertEqual(retry.intervals, [5, 10]) # interval can't be negative self.assertRaises(ValueError, Retry, max=1, interval=-5) self.assertRaises(ValueError, Retry, max=1, interval=[1, -5])
def queue_messages(targets, context, forwarder, client=None): for index, target in enumerate(targets): queue.enqueue(send_message, user_id=forwarder.user_id, bot_id=context.bot.id, target=target, index=index, targets_len=len(targets), telethon_text=forwarder.telethon_text, retry=Retry(max=2, interval=[35, 45]))
def test_decorator_custom_retry(self): """ Ensure that passing in retry to the decorator sets the retry on the job """ # Ensure default result = decorated_job.delay(1, 2) self.assertEqual(result.retries_left, None) self.assertEqual(result.retry_intervals, None) @job('default', retry=Retry(3, [2])) def hello(): return 'Why hello' result = hello.delay() self.assertEqual(result.retries_left, 3) self.assertEqual(result.retry_intervals, [2])
def test_cleanup_handles_retries(self): """Expired jobs should also be retried""" queue = Queue(connection=self.testconn) registry = StartedJobRegistry(connection=self.testconn) failed_job_registry = FailedJobRegistry(connection=self.testconn) job = queue.enqueue(say_hello, retry=Retry(max=1)) # Add job to StartedJobRegistry with past expiration time self.testconn.zadd(registry.key, {job.id: 2}) registry.cleanup() self.assertEqual(len(queue), 2) self.assertEqual(job.get_status(), JobStatus.QUEUED) self.assertNotIn(job, failed_job_registry) self.testconn.zadd(registry.key, {job.id: 2}) # Job goes to FailedJobRegistry because it's only retried once registry.cleanup() self.assertEqual(len(queue), 2) self.assertEqual(job.get_status(), JobStatus.FAILED) self.assertIn(job, failed_job_registry)
def enqueue_job(item): """add job to queue.""" logger.debug("job enqueued with worker req type: {}".format(item.req_type)) sq = Queue(item.name, connection=CONN) job = sq.enqueue( worker, kwargs={ "data": item.data, "method": item.req_type, "url": item.url, "headers": item.headers, }, result_ttl=item.ttl if item.ttl else 18600, # a day retry=Retry(max=3), ) return { "id": job.get_id(), "status": job.get_status(), "queue": item.name, "position": job.get_position(), }
def test_retry_interval(self): """Retries with intervals are scheduled""" connection = self.testconn queue = Queue(connection=connection) retry = Retry(max=1, interval=5) job = queue.enqueue(div_by_zero, retry=retry) worker = Worker([queue]) registry = queue.scheduled_job_registry # If job if configured to retry with interval, it will be scheduled, # not directly put back in the queue queue.empty() worker.handle_job_failure(job, queue) job.refresh() self.assertEqual(job.get_status(), JobStatus.SCHEDULED) self.assertEqual(job.retries_left, 0) self.assertEqual(len(registry), 1) self.assertEqual(queue.job_ids, []) # Scheduled time is roughly 5 seconds from now scheduled_time = registry.get_scheduled_time(job) now = datetime.now(timezone.utc) self.assertTrue(now + timedelta(seconds=4) < scheduled_time < now + timedelta(seconds=6))