def navbar_stats(): """ Retrieve object counts used for the navbar """ # Check cache first redis = get_redis_connection() result = redis.get("navbar_stats") if result: result = json.loads(result) return jsonify(result) queues = (get_queue(QueueType.DOWNLOAD_OBJECT), get_queue(QueueType.CREATE_SIP), get_queue(QueueType.SUBMIT_SIP), get_queue(QueueType.CONFIRM_SIP)) result = {"queues": {}} for queue in queues: result["queues"][queue.name] = { "pending": queue.count, "processing": StartedJobRegistry(queue=queue).count } # Add failed result["failed"] = sum( [FailedJobRegistry(queue=queue).count for queue in queues]) # Cache result for 2 seconds redis.set("navbar_stats", json.dumps(result), ex=2) return jsonify(result)
def get_heartbeats(): """ Get a dict containing the last timestamps for all heartbeat sources. This can be used to determine which procedures are failing for whatever reason. """ redis = get_redis_connection() with redis.pipeline() as pipe: for source in HeartbeatSource: pipe.get(f"heartbeat:{source.value}") values = pipe.execute() result = {} for source, value in zip(HeartbeatSource, values): if value is not None: value = int(value) value = datetime.datetime.fromtimestamp( value, datetime.timezone.utc ) result[source] = value return result
def submit_heartbeat(source): """ Submit heartbeat to indicate that a procedure has been run successfully """ redis = get_redis_connection() source = HeartbeatSource(source) key = f"heartbeat:{source.value}" redis.set(key, int(time.time()))
def get_queue(queue_type): """ Get RQ queue according to its QueueType :param QueueType queue_type: Queue to return """ con = get_redis_connection() queue_type = QueueType(queue_type) queue = WorkflowQueue(queue_type.value, connection=con) return queue
def wrapper(*args, **kwargs): try: # Object ID given in kwargs object_id = int(kwargs["object_id"]) except KeyError: # Object ID given in args object_id = int(args[0]) # Use lock named with the object ID to ensure mutual exclusion redis = get_redis_connection() lock = redis_lock.Lock(redis, f"lock-object-{object_id}") with lock: return func(*args, **kwargs)
def lock_queues(): """ Context manager to lock all queues. This lock should be acquired when the workflow is affected directly (eg. enqueueing new jobs) or indirectly (eg. updating database so that changes an object's qualification to be enqueued or not) """ redis = get_redis_connection() lock = redis_lock.Lock(redis, "workflow-lock", expire=900) lock.acquire(blocking=True) try: yield lock finally: lock.release()
def delete_jobs_for_object_id(object_id): """ Delete all jobs for the given object ID """ object_id = int(object_id) redis = get_redis_connection() cancelled_count = 0 for queue_type in QueueType: job_id = f"{queue_type.value}_{object_id}" try: Job.fetch(job_id, connection=redis).delete() cancelled_count += 1 except NoSuchJobError: pass return cancelled_count
def overview_stats(): """ Retrieve real-time statistics used in the 'Overview' page """ # Check cache first redis = get_redis_connection() result = redis.get("overview_stats") if result: result = json.loads(result) return jsonify(result) queues = (get_queue(QueueType.DOWNLOAD_OBJECT), get_queue(QueueType.CREATE_SIP), get_queue(QueueType.SUBMIT_SIP), get_queue(QueueType.CONFIRM_SIP)) job_count = sum([queue.count for queue in queues]) failed_count = sum( [FailedJobRegistry(queue=queue).count for queue in queues]) total_count = db.session.query(MuseumObject).count() frozen_count = (db.session.query(MuseumObject).filter( MuseumObject.frozen).count()) submitted_count = (db.session.query(MuseumObject).join( MuseumPackage, MuseumObject.latest_package_id == MuseumPackage.id).filter( and_(MuseumObject.latest_package, MuseumPackage.rejected == False, MuseumPackage.preserved == False, MuseumPackage.uploaded)).count()) rejected_count = (db.session.query(MuseumObject).join( MuseumPackage, MuseumObject.latest_package_id == MuseumPackage.id).filter( and_(MuseumObject.latest_package, MuseumPackage.rejected)).count()) preserved_count = (db.session.query(MuseumObject).with_transformation( MuseumObject.exclude_preservation_pending).filter( MuseumObject.preserved).count()) result = { "steps": { "pending": { "count": int(total_count - job_count - failed_count - frozen_count - rejected_count - submitted_count - preserved_count) }, }, "total_count": total_count } # Add the individual queues for queue in queues: result["steps"][queue.name] = {"count": queue.count} # Add counts outside of queues other_steps = [("preserved", preserved_count), ("rejected", rejected_count), ("submitted", submitted_count), ("frozen", frozen_count), ("failed", failed_count)] for name, count in other_steps: result["steps"][name] = {"count": count} # Cache result for 2 seconds redis.set("overview_stats", json.dumps(result), ex=2) return jsonify(result)