def enqueue_job(request, queue_index, job_id): """ Enqueue deferred jobs """ queue_index = int(queue_index) queue = get_queue_by_index(queue_index) job = Job.fetch(job_id, connection=queue.connection) if request.method == 'POST': queue.enqueue_job(job) # Remove job from correct registry if needed if job.get_status() == JobStatus.DEFERRED: registry = DeferredJobRegistry(queue.name, queue.connection) registry.remove(job) elif job.get_status() == JobStatus.FINISHED: registry = FinishedJobRegistry(queue.name, queue.connection) registry.remove(job) messages.info(request, 'You have successfully enqueued %s' % job.id) return redirect('rq_job_detail', queue_index, job_id) context_data = { 'queue_index': queue_index, 'job': job, 'queue': queue, } return render(request, 'django_rq/delete_job.html', context_data)
def enqueue_dependents(self, job, pipeline=None): """Enqueues all jobs in the given job's dependents set and clears it. When called without a pipeline, this method uses WATCH/MULTI/EXEC. If you pass a pipeline, only MULTI is called. The rest is up to the caller. """ from .registry import DeferredJobRegistry pipe = pipeline if pipeline is not None else self.connection.pipeline() dependents_key = job.dependents_key while True: try: # if a pipeline is passed, the caller is responsible for calling WATCH # to ensure all jobs are enqueued if pipeline is None: pipe.watch(dependents_key) dependent_job_ids = [ as_text(_id) for _id in pipe.smembers(dependents_key) ] jobs_to_enqueue = [ dependent_job for dependent_job in self.job_class.fetch_many(dependent_job_ids, connection=self.connection, serializer=self.serializer) if dependent_job.dependencies_are_met( exclude_job_id=job.id, pipeline=pipe) ] pipe.multi() for dependent in jobs_to_enqueue: registry = DeferredJobRegistry(dependent.origin, self.connection, job_class=self.job_class) registry.remove(dependent, pipeline=pipe) if dependent.origin == self.name: self.enqueue_job(dependent, pipeline=pipe) else: queue = self.__class__(name=dependent.origin, connection=self.connection) queue.enqueue_job(dependent, pipeline=pipe) pipe.delete(dependents_key) if pipeline is None: pipe.execute() break except WatchError: if pipeline is None: continue else: # if the pipeline comes from the caller, we re-raise the # exception as it it the responsibility of the caller to # handle it raise
def fail_dependents(self, job, pipeline=None): """Fails all jobs in the given job's dependents set and clears it. When called without a pipeline, this method uses WATCH/MULTI/EXEC. If you pass a pipeline, only MULTI is called. The rest is up to the caller. """ from .registry import DeferredJobRegistry, FailedJobRegistry pipe = pipeline if pipeline is not None else self.connection.pipeline() dependents_key = job.dependents_key while True: try: # if a pipeline is passed, the caller is responsible for calling WATCH # to ensure all jobs are enqueued if pipeline is None: pipe.watch(dependents_key) dependent_job_ids = [as_text(_id) for _id in pipe.smembers(dependents_key)] jobs_to_fail = self.job_class.fetch_many( dependent_job_ids, connection=self.connection ) pipe.multi() for dependent in jobs_to_fail: deferred_job_registry = DeferredJobRegistry(dependent.origin, self.connection, job_class=self.job_class) deferred_job_registry.remove(dependent, pipeline=pipe) dependent.set_status(JobStatus.FAILED, pipeline=pipe) failed_job_registry = FailedJobRegistry(dependent.origin, dependent.connection, job_class=self.job_class) failed_job_registry.add(dependent, ttl=dependent.failure_ttl, exc_string="Dependency has failed!", pipeline=pipe) self.fail_dependents(job=dependent) pipe.delete(dependents_key) if pipeline is None: pipe.execute() break except WatchError: if pipeline is None: continue else: # if the pipeline comes from the caller, we re-raise the # exception as it it the responsibility of the caller to # handle it raise return len(dependent_job_ids)