def test_default_failure_ttl(self): """Job TTL defaults to DEFAULT_FAILURE_TTL""" queue = Queue(connection=self.testconn) job = queue.enqueue(say_hello) registry = FailedJobRegistry(connection=self.testconn) key = registry.key timestamp = current_timestamp() registry.add(job) score = self.testconn.zscore(key, job.id) self.assertLess(score, timestamp + DEFAULT_FAILURE_TTL + 2) self.assertGreater(score, timestamp + DEFAULT_FAILURE_TTL - 2) # Job key will also expire job_ttl = self.testconn.ttl(job.key) self.assertLess(job_ttl, DEFAULT_FAILURE_TTL + 2) self.assertGreater(job_ttl, DEFAULT_FAILURE_TTL - 2) timestamp = current_timestamp() ttl = 5 registry.add(job, ttl=ttl) score = self.testconn.zscore(key, job.id) self.assertLess(score, timestamp + ttl + 2) self.assertGreater(score, timestamp + ttl - 2) job_ttl = self.testconn.ttl(job.key) self.assertLess(job_ttl, ttl + 2) self.assertGreater(job_ttl, ttl - 2)
def test_default_failure_ttl(self): """Job TTL defaults to DEFAULT_FAILURE_TTL""" queue = Queue(connection=self.testconn) job = queue.enqueue(say_hello) registry = FailedJobRegistry(connection=self.testconn) key = registry.key timestamp = current_timestamp() registry.add(job) self.assertLess( self.testconn.zscore(key, job.id), timestamp + DEFAULT_FAILURE_TTL + 2 ) self.assertGreater( self.testconn.zscore(key, job.id), timestamp + DEFAULT_FAILURE_TTL - 2 ) timestamp = current_timestamp() ttl = 5 registry.add(job, ttl=5) self.assertLess( self.testconn.zscore(key, job.id), timestamp + ttl + 2 ) self.assertGreater( self.testconn.zscore(key, job.id), timestamp + ttl - 2 )
def test_job_delete_removes_itself_from_registries(self): """job.delete() should remove itself from job registries""" job = Job.create(func=fixtures.say_hello, status=JobStatus.FAILED, connection=self.testconn, origin='default') job.save() registry = FailedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.FINISHED, connection=self.testconn, origin='default') job.save() registry = FinishedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.STARTED, connection=self.testconn, origin='default') job.save() registry = StartedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.DEFERRED, connection=self.testconn, origin='default') job.save() registry = DeferredJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.SCHEDULED, connection=self.testconn, origin='default') job.save() registry = ScheduledJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry)
def fail_dependents(self, job, pipeline=None): """Fails all jobs in the given job's dependents set and clears it. When called without a pipeline, this method uses WATCH/MULTI/EXEC. If you pass a pipeline, only MULTI is called. The rest is up to the caller. """ from .registry import DeferredJobRegistry, FailedJobRegistry pipe = pipeline if pipeline is not None else self.connection.pipeline() dependents_key = job.dependents_key while True: try: # if a pipeline is passed, the caller is responsible for calling WATCH # to ensure all jobs are enqueued if pipeline is None: pipe.watch(dependents_key) dependent_job_ids = [as_text(_id) for _id in pipe.smembers(dependents_key)] jobs_to_fail = self.job_class.fetch_many( dependent_job_ids, connection=self.connection ) pipe.multi() for dependent in jobs_to_fail: deferred_job_registry = DeferredJobRegistry(dependent.origin, self.connection, job_class=self.job_class) deferred_job_registry.remove(dependent, pipeline=pipe) dependent.set_status(JobStatus.FAILED, pipeline=pipe) failed_job_registry = FailedJobRegistry(dependent.origin, dependent.connection, job_class=self.job_class) failed_job_registry.add(dependent, ttl=dependent.failure_ttl, exc_string="Dependency has failed!", pipeline=pipe) self.fail_dependents(job=dependent) pipe.delete(dependents_key) if pipeline is None: pipe.execute() break except WatchError: if pipeline is None: continue else: # if the pipeline comes from the caller, we re-raise the # exception as it it the responsibility of the caller to # handle it raise return len(dependent_job_ids)
def test_failed_jobs(self): """Ensure that failed jobs page works properly.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') # Test that page doesn't fail when FailedJobRegistry is empty response = self.client.get( reverse('rq_failed_jobs', args=[queue_index])) self.assertEqual(response.status_code, 200) job = queue.enqueue(access_self) registry = FailedJobRegistry(queue.name, queue.connection) registry.add(job, 2) response = self.client.get( reverse('rq_failed_jobs', args=[queue_index])) self.assertEqual(response.context['jobs'], [job])
def _retry_handler(job: Job, *exc_info: Tuple[Union[str, bytes], ...]) -> bool: retries = job.meta.get("remaining_retries", 2) if retries > 0: retries -= 1 job.meta["remaining_retries"] = retries job.set_status(JobStatus.QUEUED) job.exc_info = exc_info job.save() q = Queue(name=job.origin, connection=job.connection) q.enqueue_job(job) logger.info(f"Retrying job {job.id}") else: logger.error(f"Failing job {job.id}") q = Queue(name=job.origin, connection=job.connection) failed_queue = FailedJobRegistry(queue=q) failed_queue.add(job, exc_string=exc_info) return False
def retry_handler(job, *exc_info): retries = job.meta.get('remaining_retries', 2) if retries > 0: retries -= 1 job.meta['remaining_retries'] = retries job.set_status(JobStatus.QUEUED) job.exc_info = exc_info job.save() q = Queue(name=job.origin, connection=job.connection) q.enqueue_job(job) logger.info(f'Retrying job {job.id}') else: logger.error(f'Failing job {job.id}') # exc_string = Worker._get_safe_exception_string(traceback.format_exception(*exc_info)) q = Queue(name=job.origin, connection=job.connection) failed_queue = FailedJobRegistry(queue=q) failed_queue.add(job, exc_string=exc_info) return False
def requeue_started(c, queue=None): """Requeue started jobs from given queue """ if queue is None: raise ValueError("Please specify queue") q = Queue( queue, connection=Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, db=REDIS_DB), ) registry_started = StartedJobRegistry(queue=q) registry_failed = FailedJobRegistry(queue=q) for job_id in registry_started.get_job_ids(): job = q.fetch_job(job_id) if job is not None and job.is_started: registry_started.remove(job) registry_failed.add(job, ttl=job.failure_ttl, exc_string="Started job moved for requeuing") job.requeue()
def test_job_delete_removes_itself_from_registries(self): """job.delete() should remove itself from job registries""" connection = self.testconn job = Job.create(func=fixtures.say_hello, status=JobStatus.FAILED, connection=self.testconn, origin='default') job.save() registry = FailedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.FINISHED, connection=self.testconn, origin='default') job.save() registry = FinishedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.STARTED, connection=self.testconn, origin='default') job.save() registry = StartedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.DEFERRED, connection=self.testconn, origin='default') job.save() registry = DeferredJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry)
def enqueue_call(self, func, args=None, kwargs=None, timeout=None, result_ttl=None, ttl=None, failure_ttl=None, description=None, depends_on=None, job_id=None, at_front=False, meta=None, retry=None): """Creates a job to represent the delayed function call and enqueues it. nd It is much like `.enqueue()`, except that it takes the function's args and kwargs as explicit arguments. Any kwargs passed to this function contain options for RQ itself. """ from .registry import FailedJobRegistry job = self.create_job( func, args=args, kwargs=kwargs, result_ttl=result_ttl, ttl=ttl, failure_ttl=failure_ttl, description=description, depends_on=depends_on, job_id=job_id, meta=meta, status=JobStatus.QUEUED, timeout=timeout, retry=retry ) # If a _dependent_ job depends on any unfinished job, register all the # _dependent_ job's dependencies instead of enqueueing it. # # `Job#fetch_dependencies` sets WATCH on all dependencies. If # WatchError is raised in the when the pipeline is executed, that means # something else has modified either the set of dependencies or the # status of one of them. In this case, we simply retry. if depends_on is not None: with self.connection.pipeline() as pipe: while True: try: pipe.watch(job.dependencies_key) dependencies = job.fetch_dependencies( watch=True, pipeline=pipe ) pipe.multi() for dependency in dependencies: if dependency.get_status(refresh=False) == JobStatus.FAILED: job.set_status(JobStatus.FAILED, pipeline=pipe) failed_job_registry = FailedJobRegistry(job.origin, job.connection, job_class=self.job_class) failed_job_registry.add(job, ttl=job.failure_ttl, exc_string="Dependency has failed!", pipeline=pipe) pipe.execute() return job for dependency in dependencies: if dependency.get_status(refresh=False) != JobStatus.FINISHED: job.set_status(JobStatus.DEFERRED, pipeline=pipe) job.register_dependency(pipeline=pipe) job.save(pipeline=pipe) job.cleanup(ttl=job.ttl, pipeline=pipe) pipe.execute() return job break except WatchError: continue job = self.enqueue_job(job, at_front=at_front) return job