def run(self, params): # If there are more than this much items on the queue, we don't try to check if our mongodb # jobs are still queued. max_queue_items = params.get("max_queue_items", 1000) stats = {"fetched": 0, "requeued": 0} # This was only checking in Redis and wasn't resistant to a redis-wide flush. # Doing Queue.all() is slower but covers more edge cases. # all_queues = Queue.all_known() all_queues = Queue.all() log.info("Checking %s queues" % len(all_queues)) for queue_name in all_queues: queue = Queue(queue_name) queue_size = queue.size() # If the queue is raw, the jobs were only stored in redis so they are lost for good. if queue.is_raw: continue log.info("Checking queue %s" % queue_name) if queue_size > max_queue_items: log.info("Stopping because queue %s has %s items" % (queue_name, queue_size)) continue queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1)) if len(queue_jobs_ids) >= max_queue_items: log.info( "Stopping because queue %s actually had more than %s items" % (queue_name, len(queue_jobs_ids)) ) continue for job_data in connections.mongodb_jobs.mrq_jobs.find( {"queue": queue_name, "status": "queued"}, projection={"_id": 1} ).sort([["_id", 1]]): stats["fetched"] += 1 if str(job_data["_id"]) in queue_jobs_ids: log.info("Found job %s on queue %s. Stopping" % (job_data["_id"], queue.id)) break # At this point, this job is not on the queue and we're sure # the queue is less than max_queue_items # We can safely requeue the job. log.info("Requeueing %s on %s" % (job_data["_id"], queue.id)) stats["requeued"] += 1 job = Job(job_data["_id"]) job.requeue(queue=queue_name) return stats
def run(self, params): # If there are more than this much items on the queue, we don't try to check if our mongodb # jobs are still queued. max_queue_items = params.get("max_queue_items", 1000) stats = {"fetched": 0, "requeued": 0} all_queues = Queue.all() for queue_name in all_queues: queue = Queue(queue_name) queue_size = queue.size() if queue.is_raw: continue log.info("Checking queue %s" % queue_name) if queue_size > max_queue_items: log.info("Stopping because queue %s has %s items" % (queue_name, queue_size)) continue queue_jobs_ids = set(queue.list_job_ids(limit=max_queue_items + 1)) if len(queue_jobs_ids) >= max_queue_items: log.info( "Stopping because queue %s actually had more than %s items" % (queue_name, len(queue_jobs_ids))) continue for job_data in connections.mongodb_jobs.mrq_jobs.find( { "queue": queue_name, "status": "queued" }, projection={ "_id": 1 }).sort([["_id", 1]]): stats["fetched"] += 1 if str(job_data["_id"]) in queue_jobs_ids: log.info("Found job %s on queue %s. Stopping" % (job_data["_id"], queue.id)) break # At this point, this job is not on the queue and we're sure # the queue is less than max_queue_items # We can safely requeue the job. log.info("Requeueing %s on %s" % (job_data["_id"], queue.id)) stats["requeued"] += 1 job = Job(job_data["_id"]) job.requeue(queue=queue_name) return stats
def api_datatables(unit): # import time # time.sleep(5) collection = None sort = None skip = int(request.args.get("iDisplayStart", 0)) limit = int(request.args.get("iDisplayLength", 20)) if unit == "queues": queues = [] for name, jobs in Queue.all().items(): queue = Queue(name) q = { "name": name, "jobs": jobs, # MongoDB size "size": queue.size(), # Redis size "is_sorted": queue.is_sorted, "is_timed": queue.is_timed, "is_raw": queue.is_raw, "is_set": queue.is_set } if queue.is_sorted: raw_config = cfg.get("raw_queues", {}).get(name, {}) q["graph_config"] = raw_config.get("dashboard_graph", lambda: { "start": time.time() - (7 * 24 * 3600), "stop": time.time() + (7 * 24 * 3600), "slices": 30 } if queue.is_timed else { "start": 0, "stop": 100, "slices": 30 })() if q["graph_config"]: q["graph"] = queue.get_sorted_graph(**q["graph_config"]) if queue.is_timed: q["jobs_to_dequeue"] = queue.count_jobs_to_dequeue() queues.append(q) queues.sort(key=lambda x: -(x["jobs"] + x["size"])) data = { "aaData": queues, "iTotalDisplayRecords": len(queues) } elif unit == "workers": fields = None query = {"status": {"$nin": ["stop"]}} collection = connections.mongodb_jobs.mrq_workers sort = [("datestarted", -1)] if request.args.get("showstopped"): query = {} elif unit == "scheduled_jobs": collection = connections.mongodb_jobs.mrq_scheduled_jobs fields = None query = {} elif unit == "jobs": fields = None query = build_api_datatables_query(request) sort = [("_id", 1)] # We can't search easily params because we store it as decoded JSON in mongo :( # Add a string index? # if request.args.get("sSearch"): # query.update(json.loads(request.args.get("sSearch"))) collection = connections.mongodb_jobs.mrq_jobs if collection is not None: cursor = collection.find(query, projection=fields) if sort: cursor.sort(sort) if skip is not None: cursor.skip(skip) if limit is not None: cursor.limit(limit) data = { "aaData": list(cursor), "iTotalDisplayRecords": collection.find(query).count() } data["sEcho"] = request.args["sEcho"] return jsonify(data)
def api_datatables(unit): # import time # time.sleep(5) collection = None sort = None skip = int(request.args.get("iDisplayStart", 0)) limit = int(request.args.get("iDisplayLength", 20)) if unit == "queues": queues = [] for name, jobs in Queue.all().items(): queue = Queue(name) q = { "name": name, "jobs": jobs, # MongoDB size "size": queue.size(), # Redis size "is_sorted": queue.is_sorted, "is_timed": queue.is_timed, "is_raw": queue.is_raw, "is_set": queue.is_set } if queue.is_sorted: raw_config = cfg.get("raw_queues", {}).get(name, {}) q["graph_config"] = raw_config.get( "dashboard_graph", lambda: { "start": time.time() - (7 * 24 * 3600), "stop": time.time() + (7 * 24 * 3600), "slices": 30 } if queue.is_timed else { "start": 0, "stop": 100, "slices": 30 })() if q["graph_config"]: q["graph"] = queue.get_sorted_graph(**q["graph_config"]) if queue.is_timed: q["jobs_to_dequeue"] = queue.count_jobs_to_dequeue() queues.append(q) queues.sort(key=lambda x: -(x["jobs"] + x["size"])) data = {"aaData": queues, "iTotalDisplayRecords": len(queues)} elif unit == "workers": fields = None query = {"status": {"$nin": ["stop"]}} collection = connections.mongodb_jobs.mrq_workers sort = [("datestarted", -1)] if request.args.get("showstopped"): query = {} elif unit == "scheduled_jobs": collection = connections.mongodb_jobs.mrq_scheduled_jobs fields = None query = {} elif unit == "jobs": fields = None query = build_api_datatables_query(request) sort = [("_id", 1)] # We can't search easily params because we store it as decoded JSON in mongo :( # Add a string index? # if request.args.get("sSearch"): # query.update(json.loads(request.args.get("sSearch"))) collection = connections.mongodb_jobs.mrq_jobs if collection is not None: cursor = collection.find(query, fields=fields) if sort: cursor.sort(sort) if skip is not None: cursor.skip(skip) if limit is not None: cursor.limit(limit) data = { "aaData": list(cursor), "iTotalDisplayRecords": collection.find(query).count() } data["sEcho"] = request.args["sEcho"] return jsonify(data)