def run(self, params): # If there are more than this much items on the queue, we don't try to check if our mongodb # jobs are still queued. max_queue_items = params.get("max_queue_items", 1000) stats = {"fetched": 0, "requeued": 0} all_queues = Queue.all_known() for queue_name in all_queues: queue = Queue(queue_name) queue_size = queue.size() if queue.is_raw: continue log.info("Checking queue %s" % queue_name) if queue_size > max_queue_items: log.info("Stopping because queue %s has %s items" % (queue_name, queue_size)) continue queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1)) if len(queue_jobs_ids) >= max_queue_items: log.info( "Stopping because queue %s actually had more than %s items" % (queue_name, len(queue_jobs_ids))) continue for job_data in connections.mongodb_jobs.mrq_jobs.find( { "queue": queue_name, "status": "queued" }, projection={ "_id": 1 }).sort([["_id", 1]]): stats["fetched"] += 1 if str(job_data["_id"]) in queue_jobs_ids: log.info("Found job %s on queue %s. Stopping" % (job_data["_id"], queue.id)) break # At this point, this job is not on the queue and we're sure # the queue is less than max_queue_items # We can safely requeue the job. log.info("Requeueing %s on %s" % (job_data["_id"], queue.id)) stats["requeued"] += 1 job = Job(job_data["_id"]) job.requeue(queue=queue_name) return stats
def run(self, params): # If there are more than this much items on the queue, we don't try to check if our mongodb # jobs are still queued. max_queue_items = params.get("max_queue_items", 1000) stats = {"fetched": 0, "requeued": 0} # This was only checking in Redis and wasn't resistant to a redis-wide flush. # Doing Queue.all() is slower but covers more edge cases. # all_queues = Queue.all_known() all_queues = Queue.all() log.info("Checking %s queues" % len(all_queues)) for queue_name in all_queues: queue = Queue(queue_name) queue_size = queue.size() # If the queue is raw, the jobs were only stored in redis so they are lost for good. if queue.is_raw: continue log.info("Checking queue %s" % queue_name) if queue_size > max_queue_items: log.info("Stopping because queue %s has %s items" % (queue_name, queue_size)) continue queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1)) if len(queue_jobs_ids) >= max_queue_items: log.info( "Stopping because queue %s actually had more than %s items" % (queue_name, len(queue_jobs_ids)) ) continue for job_data in connections.mongodb_jobs.mrq_jobs.find( {"queue": queue_name, "status": "queued"}, projection={"_id": 1} ).sort([["_id", 1]]): stats["fetched"] += 1 if str(job_data["_id"]) in queue_jobs_ids: log.info("Found job %s on queue %s. Stopping" % (job_data["_id"], queue.id)) break # At this point, this job is not on the queue and we're sure # the queue is less than max_queue_items # We can safely requeue the job. log.info("Requeueing %s on %s" % (job_data["_id"], queue.id)) stats["requeued"] += 1 job = Job(job_data["_id"]) job.requeue(queue=queue_name) return stats
def run(self, params): collection = connections.mongodb_jobs.mrq_jobs # If there are more than this much items on the queue, we don't try to check if our mongodb # jobs are still queued. max_queue_items = params.get("max_queue_items", 1000) stats = {"fetched": 0, "requeued": 0} for job_data in collection.find({ "status": "queued" }, fields={ "_id": 1, "queue": 1 }).sort([("_id", 1)]): stats["fetched"] += 1 queue = Queue(job_data["queue"]) queue_size = queue.size() if queue_size > max_queue_items: log.info("Stopping because queue %s has %s items" % (queue, queue_size)) break queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1)) if len(queue_jobs_ids) >= max_queue_items: log.info( "Stopping because queue %s actually had more than %s items" % (queue, len(queue_jobs_ids))) break if str(job_data["_id"]) in queue_jobs_ids: log.info("Stopping because we found job %s in redis" % job_data["_id"]) break # At this point, this job is not on the queue and we're sure # the queue is less than max_queue_items # We can safely requeue the job. log.info("Requeueing %s on %s" % (job_data["_id"], queue.id)) stats["requeued"] += 1 job = Job(job_data["_id"]) job.requeue(queue=job_data["queue"]) return stats
def run(self, params): self.collection = connections.mongodb_jobs.mrq_jobs # If there are more than this much items on the queue, we don't try to check if our mongodb # jobs are still queued. max_queue_items = params.get("max_queue_items", 1000) stats = { "fetched": 0, "requeued": 0 } for job_data in self.collection.find({ "status": "queued" }, fields={"_id": 1, "queue": 1}).sort([("_id", 1)]): stats["fetched"] += 1 queue = Queue(job_data["queue"]) queue_size = queue.size() if queue_size > max_queue_items: log.info("Stopping because queue %s has %s items" % (queue, queue_size)) break queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1)) if len(queue_jobs_ids) >= max_queue_items: log.info("Stopping because queue %s actually had more than %s items" % (queue, len(queue_jobs_ids))) break if str(job_data["_id"]) in queue_jobs_ids: log.info("Stopping because we found job %s in redis" % job_data["_id"]) break # At this point, this job is not on the queue and we're sure the queue is less than max_queue_items # We can safely requeue the job. log.info("Requeueing %s on %s" % (job_data["_id"], queue.id)) stats["requeued"] += 1 job = Job(job_data["_id"]) job.requeue(queue=job_data["queue"]) return stats