def clear_all_redis_jobs(): registry = ScheduledJobRegistry(queue=current_app.auto_jobs) for job_id in registry.get_job_ids(): registry.remove(job_id, delete_job=True) for queue in current_app.redis_queues: queue.delete() result = { 'status': 'success', 'msg': 'Cleared all redis jobs', 'issues': [] } return jsonify(result=result)
def scheduled_jobs(request, queue_index): queue_index = int(queue_index) queue = get_queue_by_index(queue_index) registry = ScheduledJobRegistry(queue.name, queue.connection) items_per_page = 100 num_jobs = len(registry) page = int(request.GET.get('page', 1)) jobs = [] if num_jobs > 0: last_page = int(ceil(num_jobs / items_per_page)) page_range = range(1, last_page + 1) offset = items_per_page * (page - 1) job_ids = registry.get_job_ids(offset, offset + items_per_page - 1) jobs = Job.fetch_many(job_ids, connection=queue.connection) for i, job in enumerate(jobs): if job is None: registry.remove(job_ids[i]) else: job.scheduled_at = registry.get_scheduled_time(job) else: page_range = [] context_data = { 'queue': queue, 'queue_index': queue_index, 'jobs': jobs, 'num_jobs': num_jobs, 'page': page, 'page_range': page_range, 'job_status': 'Scheduled', } return render(request, 'django_rq/jobs.html', context_data)
def schedual_jobs(repeat_in=30, initial_run=False): registry = ScheduledJobRegistry(queue=current_app.auto_jobs) if 'schedule_job' not in list(registry.get_job_ids()): for job_id in registry.get_job_ids(): registry.remove(job_id) print('Setting repeat jobs..') current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in), task_check_uniref_has_blast_source) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 4), check_random_uniref) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 8), task_check_blast_status) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 12), task_check_ssn_status) current_app.auto_jobs.enqueue_in(timedelta(minutes=repeat_in + 16), schedual_jobs, job_id='schedule_job') if initial_run == True: current_app.auto_jobs.enqueue_in(timedelta(minutes=1), task_check_blast_status) current_app.auto_jobs.enqueue_in(timedelta(minutes=2), task_check_ssn_status)
from apscheduler.schedulers.background import BackgroundScheduler app = Flask(__name__) cors = CORS(app, resources={r"/*": {"origins": "*"}}) redis_conn = Redis(host='redis', port=6379) q = Queue(connection=redis_conn) failed_registry = FailedJobRegistry(queue=q) for job_id in failed_registry.get_job_ids(): app.logger.error("fal del-> " + str(job_id)) failed_registry.remove(job_id, delete_job=True) sch_registry = ScheduledJobRegistry(queue=q) for job_id in sch_registry.get_job_ids(): app.logger.error("sch del-> " + str(job_id)) sch_registry.remove(job_id, delete_job=True) def job_function(): con = conDB.newCon() data = conDB.getPDFs(con).fetchall() # ter em conta time out to job result # ver se status é 0 o u 1 antes de chamar result for i in data: if i[6] == 0: # status for 0 faz call para obter resultado do job job = Job.fetch(i[5], connection=redis_conn) #ver se ja cabou ou n if job.get_status() == "finished": pdfName = job.result[1] # full path with open(pdfName, 'rb') as input_file: ablob = input_file.read()