def check_stuck_analysis(): """ In case the analysis is stuck for whatever reason, we should force the status "failed" to avoid special exceptions, we can just put this function as a cron to cleanup. """ logger.info("started check_stuck_analysis") running_jobs = Job.objects.filter(status="running") logger.info(f"checking if {len(running_jobs)} jobs are stuck") jobs_id_stuck = [] for running_job in running_jobs: now = get_now() difference = now - datetime.timedelta(minutes=25) if difference > running_job.received_request_time: logger.error(f"found stuck analysis, job_id:{running_job.id}." f"Setting the job to status to 'failed'") jobs_id_stuck.append(running_job.id) general.set_job_status(running_job.id, "failed") running_job.finished_analysis_time = get_now() running_job.save(update_fields=["finished_analysis_time"]) logger.info("finished check_stuck_analysis") return jobs_id_stuck
def check_stuck_analysis(): # in case the analysis is stuck for whatever, we should force the status "failed" # to avoid special exceptions, we can just put this function as a cron to cleanup logger.info("started check_stuck_analysis") running_jobs = Job.objects.filter(status="running") logger.info("checking if {} jobs are stuck".format(len(running_jobs))) jobs_id_stuck = [] for running_job in running_jobs: now = get_now() difference = now - datetime.timedelta(minutes=25) if difference > running_job.received_request_time: logger.error( "found stuck analysis, job_id:{}. Setting the job to status 'failed'" .format(running_job.id)) jobs_id_stuck.append(running_job.id) general.set_job_status(running_job.id, "failed", logger) logger.info("finished check_stuck_analysis") return jobs_id_stuck