def get(self): """获取任务列表""" args = request.args job_status = args.get('job_status') queue_name = args.get('queue_name') if job_status not in Config.RQ_JOB_STATUS: return { 'code': StatesCode.JOB_STATUS_NO_EXIST, 'message': '任务状态不存在!' } if queue_name not in Config.RQ_QUEUES_ALL: return {'code': StatesCode.QUEUE_NOT_EXIST, 'message': '任务队列不存在!'} job_list = [] if job_status == 'queued': if queue_name == 'all': for queue_name in Config.RQ_QUEUES: job_list += queue_dict[queue_name].get_job_ids() else: job_list = queue_dict[queue_name].get_job_ids() elif job_status == 'started': if queue_name == 'all': for queue_name in Config.RQ_QUEUES: started_job_registry = StartedJobRegistry( queue=queue_dict[queue_name]) job_list += started_job_registry.get_job_ids() else: started_job_registry = StartedJobRegistry( queue=queue_dict[queue_name]) job_list = started_job_registry.get_job_ids() elif job_status == 'finished': if queue_name == 'all': for queue_name in Config.RQ_QUEUES: finished_job_registry = FinishedJobRegistry( queue=queue_dict[queue_name]) job_list += finished_job_registry.get_job_ids() else: finished_job_registry = FinishedJobRegistry( queue=queue_dict[queue_name]) job_list = finished_job_registry.get_job_ids() elif job_status == 'failed': if queue_name == 'all': for queue_name in Config.RQ_QUEUES: failed_job_registry = FailedJobRegistry( queue=queue_dict[queue_name]) job_list += failed_job_registry.get_job_ids() else: failed_job_registry = FailedJobRegistry( queue=queue_dict[queue_name]) job_list = failed_job_registry.get_job_ids() elif job_status == 'deferred': if queue_name == 'all': for queue_name in Config.RQ_QUEUES: deferred_job_registry = DeferredJobRegistry( queue=queue_dict[queue_name]) job_list += deferred_job_registry.get_job_ids() else: deferred_job_registry = DeferredJobRegistry( queue=queue_dict[queue_name]) job_list = deferred_job_registry.get_job_ids() return {'code': StatesCode.SUCCESS, 'data': job_list}
def get_tasks(): try: with Connection(redis.from_url(current_app.config['REDIS_URL'])): q = Queue() started = StartedJobRegistry().get_job_ids() finished = FinishedJobRegistry().get_job_ids() jobs = started + q.get_job_ids() + finished objects = [] for element in jobs: task = q.fetch_job(element) if task.meta['token'] == Authentication.decode_auth_token(current_app.config['SECRET_KEY'], session['auth_token']): if task: response_object = { 'status': 'success', 'data': { 'task_id': task.get_id(), 'task_status': task.get_status(), 'task_result': task.result, 'task_name': task.meta['task_name'], 'file_name': task.meta['file_name'] } } else: response_object = {'status': 'error'} objects.append(response_object) return jsonify(objects), 200 except Exception as e: return jsonify({'error': e.__str__()}), 500
def check_run_queue(): # Failed jobs for id in failed_jobs_by_queue(RunQueue): update_status(id, "PRIMEval", "Run", 0, 0) if os.path.exists(run_folder + str(id)): shutil.rmtree(run_folder + str(id)) try: error_message = Job.fetch(str(id), connection=redisClient).exc_info error_message = error_message[-1000:] error_message = base64.urlsafe_b64encode( error_message.encode()).decode() Job.fetch(str(id), connection=redisClient).delete() update_status(id, "PRIMEval", "Run", 0, 0, error_message) except: return # Finished jobs for id in FinishedJobRegistry('RunQueue', connection=redisClient).get_job_ids(): update_status(id, "PRIMEval", "Run", 0, 1) try: Job.fetch(str(id), connection=redisClient).delete() except: return UploadQueue.enqueue(upload_results, args=(id, ), job_id=str(id), timeout=1200) update_status(id, "Upload", "Queued", 1, 0)
def get_task_list(): tasks = [] registry = StartedJobRegistry('default', connection=connection) job_ids = registry.get_job_ids() for job_id in job_ids: j = Job.fetch(job_id, connection=connection) job_info = dict(guid=j.id, status=j.get_status()) tasks.append(job_info) registry = FinishedJobRegistry('default', connection=connection) job_ids = registry.get_job_ids() for job_id in job_ids: j = Job.fetch(job_id, connection=connection) job_info = dict(guid=j.id, status=j.get_status()) tasks.append(job_info) registry = FailedJobRegistry('default', connection=connection) job_ids = registry.get_job_ids() for job_id in job_ids: j = Job.fetch(job_id, connection=connection) job_info = dict(guid=j.id, status=j.get_status()) tasks.append(job_info) return jsonify({ "tasks": tasks }), 201
def get_queue_registry_jobs_count(queue_name, registry_name, offset, per_page): queue = Queue(queue_name) if registry_name != "queued": if per_page >= 0: per_page = offset + (per_page - 1) if registry_name == "failed": current_queue = FailedJobRegistry(queue_name) elif registry_name == "deferred": current_queue = DeferredJobRegistry(queue_name) elif registry_name == "started": current_queue = StartedJobRegistry(queue_name) elif registry_name == "finished": current_queue = FinishedJobRegistry(queue_name) else: current_queue = queue total_items = current_queue.count job_ids = current_queue.get_job_ids(offset, per_page) current_queue_jobs = [queue.fetch_job(job_id) for job_id in job_ids] jobs = [ serialize_job(job) for job in current_queue_jobs if job is not None ] return (total_items, jobs)
def empty_queue(queue_name, registry_name): if registry_name == "queued": q = Queue(queue_name) q.empty() elif registry_name == "failed": ids = FailedJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "deferred": ids = DeferredJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "started": ids = StartedJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "scheduled": ids = ScheduledJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) elif registry_name == "finished": ids = FinishedJobRegistry(queue_name).get_job_ids() for id in ids: delete_job_view(id) return dict(status="OK")
def is_training(cls, project): if not cls.has_active_model(project): return {'is_training': False} m = cls.get(project) if cls.without_redis(): return { 'is_training': m.is_training, 'backend': 'none', 'model_version': m.model_version } else: started_jobs = StartedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids() finished_jobs = FinishedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids() failed_jobs = FailedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids() running_jobs = list(set(started_jobs) - set(finished_jobs + failed_jobs)) logger.debug('Running jobs: ' + str(running_jobs)) for job_id in running_jobs: job = Job.fetch(job_id, connection=cls._redis) if job.meta.get('project') == project: return { 'is_training': True, 'job_id': job_id, 'backend': 'redis', 'model_version': m.model_version, } return { 'is_training': False, 'backend': 'redis', 'model_version': m.model_version }
def serialize_queues(instance_number, queues): return [ dict( name=q.name, count=q.count, queued_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="queued", per_page="8", page="1", ), failed_job_registry_count=FailedJobRegistry(q.name).count, failed_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="failed", per_page="8", page="1", ), started_job_registry_count=StartedJobRegistry(q.name).count, started_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="started", per_page="8", page="1", ), scheduled_job_registry_count=ScheduledJobRegistry(q.name).count, scheduled_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="scheduled", per_page="8", page="1", ), deferred_job_registry_count=DeferredJobRegistry(q.name).count, deferred_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="deferred", per_page="8", page="1", ), finished_job_registry_count=FinishedJobRegistry(q.name).count, finished_url=url_for( ".jobs_overview", instance_number=instance_number, queue_name=q.name, registry_name="finished", per_page="8", page="1", ), ) for q in queues ]
def __init__(self, name, connection): self._name = name self._logger = logger.bind(queue=name) self._queue = rq.Queue('vivarium', connection=connection) self._wip = StartedJobRegistry('vivarium', connection=connection, job_class=self._queue.job_class) self._finished = FinishedJobRegistry('vivarium', connection=connection, job_class=self._queue.job_class) self._status = { 'total': 0, 'pending': 0, 'running': 0, 'failed': 0, 'finished': 0, 'done': 0., 'workers': 0 } self._failed = False self._completed = False self._retries = QueueManager.retries_before_fail self._backoff = QueueManager.backoff
def __init__(self, name: str, connection: redis.Redis): self._name = name self._logger = logger.bind(queue=name) self._queue = rq.Queue("vivarium", connection=connection) self._wip = StartedJobRegistry( "vivarium", connection=connection, job_class=self._queue.job_class ) self._finished = FinishedJobRegistry( "vivarium", connection=connection, job_class=self._queue.job_class ) self._status = { "total": 0, "pending": 0, "running": 0, "failed": 0, "finished": 0, "done": 0.0, "workers": 0, } self._failed = False self._completed = False self._retries = QueueManager.retries_before_fail self._backoff = QueueManager.backoff
def finished_jobs(request, queue_index): queue_index = int(queue_index) queue = get_queue_by_index(queue_index) registry = FinishedJobRegistry(queue.name, queue.connection) items_per_page = 100 num_jobs = len(registry) page = int(request.GET.get('page', 1)) jobs = [] if num_jobs > 0: last_page = int(ceil(num_jobs / items_per_page)) page_range = range(1, last_page + 1) offset = items_per_page * (page - 1) job_ids = registry.get_job_ids(offset, offset + items_per_page - 1) jobs = get_jobs(queue, job_ids, registry) else: page_range = [] context_data = { 'queue': queue, 'queue_index': queue_index, 'jobs': jobs, 'num_jobs': num_jobs, 'page': page, 'page_range': page_range, 'job_status': 'Finished', } return render(request, 'django_rq/jobs.html', context_data)
def getfinishedjobs(self, q): try: registry = FinishedJobRegistry(q, connection=self.base_connection) response_object = registry.get_job_ids() return response_object except Exception as e: return e
def enqueue_job(request, queue_index, job_id): """ Enqueue deferred jobs """ queue_index = int(queue_index) queue = get_queue_by_index(queue_index) job = Job.fetch(job_id, connection=queue.connection) if request.method == 'POST': queue.enqueue_job(job) # Remove job from correct registry if needed if job.get_status() == JobStatus.DEFERRED: registry = DeferredJobRegistry(queue.name, queue.connection) registry.remove(job) elif job.get_status() == JobStatus.FINISHED: registry = FinishedJobRegistry(queue.name, queue.connection) registry.remove(job) messages.info(request, 'You have successfully enqueued %s' % job.id) return redirect('rq_job_detail', queue_index, job_id) context_data = { 'queue_index': queue_index, 'job': job, 'queue': queue, } return render(request, 'django_rq/delete_job.html', context_data)
def get_statistics(): queues = [] workers_collections = collect_workers_by_connection(QUEUES_LIST) for index, config in enumerate(QUEUES_LIST): queue = get_queue_by_index(index) connection = queue.connection connection_kwargs = connection.connection_pool.connection_kwargs # Raw access to the first item from left of the redis list. # This might not be accurate since new job can be added from the left # with `at_front` parameters. # Ideally rq should supports Queue.oldest_job last_job_id = connection.lindex(queue.key, 0) last_job = queue.fetch_job( last_job_id.decode('utf-8')) if last_job_id else None if last_job: oldest_job_timestamp = to_localtime(last_job.enqueued_at)\ .strftime('%Y-%m-%d, %H:%M:%S') else: oldest_job_timestamp = "-" # parse_class is not needed and not JSON serializable try: del (connection_kwargs['parser_class']) except KeyError: pass queue_data = { 'name': queue.name, 'jobs': queue.count, 'oldest_job_timestamp': oldest_job_timestamp, 'index': index, 'connection_kwargs': connection_kwargs } if queue.name == 'failed': queue_data['workers'] = '-' queue_data['finished_jobs'] = '-' queue_data['started_jobs'] = '-' queue_data['deferred_jobs'] = '-' else: connection = get_connection(queue.name) all_workers = get_all_workers_by_configuration( config['connection_config'], workers_collections) queue_workers = [ worker for worker in all_workers if queue in worker.queues ] queue_data['workers'] = len(queue_workers) finished_job_registry = FinishedJobRegistry(queue.name, connection) started_job_registry = StartedJobRegistry(queue.name, connection) deferred_job_registry = DeferredJobRegistry(queue.name, connection) queue_data['finished_jobs'] = len(finished_job_registry) queue_data['started_jobs'] = len(started_job_registry) queue_data['deferred_jobs'] = len(deferred_job_registry) queues.append(queue_data) return {'queues': queues}
def get_finished_tasks(request): current_queue = request.GET.get('queue') queue = django_rq.get_queue(current_queue) registry = FinishedJobRegistry(queue.name, queue.connection) items_per_page = 10 num_jobs = len(registry) jobs = [] if num_jobs > 0: offset = 0 job_ids = registry.get_job_ids(offset, items_per_page) for job_id in job_ids: try: jobs.append(Job.fetch(job_id, connection=queue.connection)) except NoSuchJobError: pass jobdata = list() for job in jobs: job_dict = { 'job_id': job.id, 'func_name': job.func_name, 'ended_at': job.ended_at.strftime("%a, %d %b %Y %H:%M:%S +0000"), 'enqueued_at': job.enqueued_at.strftime("%a, %d %b %Y %H:%M:%S +0000"), 'args': job.args} jobdata.append(job_dict) data = json.dumps(jobdata) return HttpResponse(data, content_type='application/json')
def serialize_queues(queues): return [ dict( name=q.name, count=q.count, queued_url=url_for('.overview', content_name='jobs', queue_name=q.name), failed_job_registry_count=FailedJobRegistry(q.name).count, failed_url=url_for('.overview', content_name='jobs', queue_name=q.name, registry_name='failed'), started_job_registry_count=StartedJobRegistry(q.name).count, started_url=url_for('.overview', content_name='jobs', queue_name=q.name, registry_name='started'), deferred_job_registry_count=DeferredJobRegistry(q.name).count, deferred_url=url_for('.overview', content_name='jobs', queue_name=q.name, registry_name='deferred'), finished_job_registry_count=FinishedJobRegistry(q.name).count, finished_url=url_for('.overview', content_name='jobs', queue_name=q.name, registry_name='finished'), ) for q in queues ]
def test_reenqueue_object_success(reenqueue_object, session, redis, museum_object, museum_package): # Create fake DB entries museum_package.downloaded = True museum_package.packaged = True museum_package.uploaded = True museum_package.rejected = True session.commit() # Create a job that was completed prior to re-enqueuing queue = get_queue(QueueType.CONFIRM_SIP) queue.enqueue(successful_job, job_id="confirm_sip_123456") SimpleWorker([queue], connection=queue.connection).work(burst=True) finished_registry = FinishedJobRegistry(queue=queue) assert finished_registry.get_job_ids() == ["confirm_sip_123456"] result = reenqueue_object(["123456"]) assert "Object 123456 re-enqueued" in result.stdout # New RQ task was enqueued queue = get_queue(QueueType.DOWNLOAD_OBJECT) assert "download_object_123456" in queue.job_ids # Database was updated db_museum_object = session.query(MuseumObject).filter_by(id=123456).one() assert len(db_museum_object.packages) == 1 assert not db_museum_object.latest_package # Prior finished job was removed assert finished_registry.get_job_ids() == []
def test_job_delete_removes_itself_from_registries(self): """job.delete() should remove itself from job registries""" job = Job.create(func=fixtures.say_hello, status=JobStatus.FAILED, connection=self.testconn, origin='default') job.save() registry = FailedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.FINISHED, connection=self.testconn, origin='default') job.save() registry = FinishedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.STARTED, connection=self.testconn, origin='default') job.save() registry = StartedJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.DEFERRED, connection=self.testconn, origin='default') job.save() registry = DeferredJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry) job = Job.create(func=fixtures.say_hello, status=JobStatus.SCHEDULED, connection=self.testconn, origin='default') job.save() registry = ScheduledJobRegistry(connection=self.testconn) registry.add(job, 500) job.delete() self.assertFalse(job in registry)
def get_all_finished_tasks_from(queue_name, app_info): with Connection(redis.from_url(app_info['REDIS_URL'])): f_registry = FinishedJobRegistry(queue_name) job_ids = f_registry.get_job_ids() # Generate the task info return generate_task_info(queue_name, job_ids, "finished")
def stats(self): queue_data = {} workers = self.workers() queued = self.queued_jobs() with Connection(redis.from_url(self.url)): q = Queue() q.connection finished_job_registry = FinishedJobRegistry() started_jobs_registry = StartedJobRegistry() deferred_jobs_registry = DeferredJobRegistry() failed_jobs_registry = FailedJobRegistry() worker = Worker(['default']) queue_data['finished_jobs'] = len(finished_job_registry) queue_data['started_jobs'] = len(started_jobs_registry) queue_data['deferred_jobs'] = len(deferred_jobs_registry) queue_data['failed_jobs'] = len(failed_jobs_registry) queue_data['workers'] = len(workers) queue_data['queued_jobs'] = len(queued) queue_data['active_jobs'] = queue_data[ 'started_jobs'] + queue_data['queued_jobs'] return queue_data
def test_getting_registries_with_serializer(self): """Getting job registries from queue object (with custom serializer)""" queue = Queue('example', serializer=JSONSerializer) self.assertEqual(queue.scheduled_job_registry, ScheduledJobRegistry(queue=queue)) self.assertEqual(queue.started_job_registry, StartedJobRegistry(queue=queue)) self.assertEqual(queue.failed_job_registry, FailedJobRegistry(queue=queue)) self.assertEqual(queue.deferred_job_registry, DeferredJobRegistry(queue=queue)) self.assertEqual(queue.finished_job_registry, FinishedJobRegistry(queue=queue)) self.assertEqual(queue.canceled_job_registry, CanceledJobRegistry(queue=queue)) # Make sure we don't use default when queue has custom self.assertEqual(queue.scheduled_job_registry.serializer, JSONSerializer) self.assertEqual(queue.started_job_registry.serializer, JSONSerializer) self.assertEqual(queue.failed_job_registry.serializer, JSONSerializer) self.assertEqual(queue.deferred_job_registry.serializer, JSONSerializer) self.assertEqual(queue.finished_job_registry.serializer, JSONSerializer) self.assertEqual(queue.canceled_job_registry.serializer, JSONSerializer)
def test_run_scheduled_access_self(self): """Schedule a job that schedules a job, then run the worker as subprocess""" q = Queue() job = q.enqueue(schedule_access_self) subprocess.check_call(['rqworker', '-u', self.redis_url, '-b']) registry = FinishedJobRegistry(queue=q) self.assertTrue(job in registry) assert q.count == 0
def test_getting_registries(self): """Getting job registries from queue object""" queue = Queue('example') self.assertEqual(queue.scheduled_job_registry, ScheduledJobRegistry(queue=queue)) self.assertEqual(queue.started_job_registry, StartedJobRegistry(queue=queue)) self.assertEqual(queue.failed_job_registry, FailedJobRegistry(queue=queue)) self.assertEqual(queue.deferred_job_registry, DeferredJobRegistry(queue=queue)) self.assertEqual(queue.finished_job_registry, FinishedJobRegistry(queue=queue))
def get_previous_job_from_registry(index=-1): q = Queue('high', connection=conn) registry = FinishedJobRegistry(queue=q) job_id = registry.get_job_ids()[index] job = q.fetch_job(job_id) return job
def getfinishedjobs(self, q): """returns list of finished redis jobs""" log.info(f"getting finished jobs: {q}") try: registry = FinishedJobRegistry(q, connection=self.base_connection) response_object = registry.get_job_ids() return response_object except Exception as e: return e
def _remove_jobs(self, project): started_registry = StartedJobRegistry(self._redis_queue.name, self._redis_queue.connection) finished_registry = FinishedJobRegistry(self._redis_queue.name, self._redis_queue.connection) for job_id in started_registry.get_job_ids() + finished_registry.get_job_ids(): job = Job.fetch(job_id, connection=self._redis) if job.meta.get('project') != project: continue logger.info(f'Deleting job_id {job_id}') job.delete()
def clean(queue_name=DEFAULT_QUEUE_NAME, connection=None): from redis import Redis from rq import Queue from rq.registry import FinishedJobRegistry, FailedJobRegistry queue = Queue(queue_name, connection=connection or Redis()) queue.empty() for registry in [FinishedJobRegistry(queue=queue), FailedJobRegistry(queue=queue)]: for job_id in registry.get_job_ids(): registry.remove(job_id)
def test_finished_jobs(self): """Ensure that finished jobs page works properly.""" queue = get_queue('django_rq_test') queue_index = get_queue_index('django_rq_test') job = queue.enqueue(access_self) registry = FinishedJobRegistry(queue.name, queue.connection) registry.add(job, 2) response = self.client.get( reverse('rq_finished_jobs', args=[queue_index])) self.assertEqual(response.context['jobs'], [job])
def show_results(): registry = FinishedJobRegistry(queue=q) ids = registry.get_job_ids() print("Got finished jobs:", len(ids)) res = [] for j in ids: job = q.fetch_job(j) if job.result and job.result is not None and isinstance(job.result, ResultField): print("Result:", job.result.name, job.result.power, job.result.duration) res.append(job.result) return render_template('overview.html', rows=res)
def jobs(): from rq.registry import BaseRegistry, StartedJobRegistry, FinishedJobRegistry, FailedJobRegistry BaseRegistry.get_jobs = lambda s: [ app.queue.fetch_job(job_id) for job_id in s.get_job_ids() ] return { 'queued': app.queue.get_jobs(), 'started': StartedJobRegistry(queue=app.queue).get_jobs(), 'finished': FinishedJobRegistry(queue=app.queue).get_jobs(), 'failed': FailedJobRegistry(queue=app.queue).get_jobs(), }