def cancel_optimization(self, request, public_id=None): transport_network_obj = self.get_object() if transport_network_obj.optimization_status in [ TransportNetwork.STATUS_ERROR, TransportNetwork.STATUS_FINISHED ]: raise ValidationError('Optimization is not running or queued') redis_conn = get_connection() workers = Worker.all(redis_conn) for worker in workers: if worker.state == WorkerStatus.BUSY and \ worker.get_current_job_id() == str(transport_network_obj.job_id): send_kill_horse_command(redis_conn, worker.name) # remove from queue cancel_job(str(transport_network_obj.job_id), connection=redis_conn) transport_network_obj.optimization_status = None transport_network_obj.optimization_ran_at = None transport_network_obj.optimization_error_message = None transport_network_obj.save() return Response( TransportNetworkSerializer(transport_network_obj).data, status.HTTP_200_OK)
def list_workers_api(): workers_list = Worker.all() rq_workers = [] for worker in workers_list: host_ip_using_name = "N/A" try: host_ip_using_name = socket.gethostbyname(worker.hostname) except socket.gaierror as addr_error: pass rq_workers.append({ 'worker_name': worker.name, 'listening_on': ', '.join(queue.name for queue in worker.queues), 'status': worker.get_state() if not is_suspended(get_current_connection()) else "suspended", 'host_ip': host_ip_using_name, 'current_job_id': worker.get_current_job_id(), 'failed_jobs': worker.failed_job_count, }) return { 'data': rq_workers, }
def workers(): counter = Counter() for w in Worker.all(connection=worker.connection): for q in w.queues: counter[q.name] += 1 import pprint pprint.pprint(dict(counter))
def rq_workers(queue=None): # type: (Queue) -> List[Worker] """ Returns the list of current rq ``Worker``s. """ return Worker.all(connection=Redis(), queue=queue)
def workers(): """Show information on salactus workers. (slow)""" counter = Counter() for w in Worker.all(connection=worker.connection): for q in w.queues: counter[q.name] += 1 import pprint pprint.pprint(dict(counter))
def get_worker_statistics(self, worker_name): workers = Worker.all(queue=self.default_queue) for worker in workers: if worker.name == worker_name: return dict(total=worker.total_working_time, successful=worker.successful_job_count, failed=worker.failed_job_count) return dict(error='Worker not found')
def num_connected_workers(self): return len([ w for w in Worker.all(queue=self.queue) if w.state in ( WorkerStatus.STARTED, WorkerStatus.SUSPENDED, WorkerStatus.BUSY, WorkerStatus.IDLE, ) ])
def get_all_workers_statistics(self): workers = Worker.all(queue=self.default_queue) res = [] for worker in workers: data = dict(total=worker.total_working_time, successful=worker.successful_job_count, failed=worker.failed_job_count) d = dict(name=worker.name, stat=data) res.append(d) return res
def workers(cls): cls.connect() remove_ghost_workers() return [{ 'name': w.name, 'key': w.key, 'pid': w.pid, 'state': w.state, 'stopped': w.stopped, 'queues': w.queue_names() } for w in Worker.all()]
def there_are_busy_workers(worker_name_regex): queue_index = 0 queue = get_queue_by_index(queue_index) workers = Worker.all(queue.connection) if worker_name_regex is not None: workers = [worker for worker in workers if re.match(worker_name_regex, worker.name)] busy_workers = [worker.name for worker in workers if worker.get_state() == 'busy'] print('busy_workers: ', busy_workers) return len(busy_workers) > 0
def get_current_worker(): """ Get the rq worker assigned to the current job Returns: class:`rq.worker.Worker`: The worker assigned to the current job """ for worker in Worker.all(): if worker.get_current_job() == get_current_job(): return worker return None
def remove_ghost_workers(): if not OPTIONS.get('remove_ghost_workers', False): return if not redis_runs_on_same_machine(): logger.warning('Cannot remove Ghost Workers, because the configured Redis Server is not running on localhost!') return setup_rq_connection() for w in Worker.all(): if not worker_running(w): w.register_death()
def workers(request, queue_index): queue_index = int(queue_index) queue = get_queue_by_index(queue_index) all_workers = Worker.all(queue.connection) workers = [worker for worker in all_workers if queue.name in worker.queue_names()] context_data = { 'queue': queue, 'queue_index': queue_index, 'workers': workers, } return render(request, 'django_rq/workers.html', context_data)
def quantity(self): """ Returns the aggregated number of tasks of the proc queues. """ count = sum([client.count for client in self.clients]) # Add any workers which are currently working jobs all_workers = Worker.all(connection=self.connection) for worker in all_workers: if worker.get_current_job(): count += 1 return count
def delete_job(job_id): redis_conn = get_connection() workers = Worker.all(redis_conn) for worker in workers: if worker.state == WorkerStatus.BUSY and \ worker.get_current_job_id() == str(job_id): send_kill_horse_command(redis_conn, worker.name) try: # remove from queue cancel_job(str(job_id), connection=redis_conn) except NoSuchJobError: pass
def remove_ghost_workers(): if not OPTIONS.get('remove_ghost_workers', False): return if not redis_runs_on_same_machine(): logger.warning( 'Cannot remove Ghost Workers, because the configured Redis Server is not running on localhost!' ) return setup_rq_connection() for w in Worker.all(): if not worker_running(w): w.register_death()
def cancel_job(id): print("Attempting To Cancel...") job = q.fetch_job(id) if job is not None: if job.status == "finished": return "Job has already completed" elif job.status == "started": w = Worker.all(connection=redis) for worker in w: j = worker.get_current_job() if j.id == job.id: result = worker.key.split('.') os.kill(int(result[1]), signal.SIGKILL) print("OS KILL: {}".format(worker.pid)) else: job.delete() return "Cancelled" else: return "Job Id does not exist"
def get(self): '''获取worker列表''' worker_list = [] total = Worker.count(connection=rq2.connection) workers = Worker.all(connection=rq2.connection) for worker in workers: worker_list.append({ "queue_names": worker.queue_names(), "current_job": worker.get_current_job(), "heartbeat": worker.heartbeat(), "name": worker.name, "state": worker.get_state(), }) return { "code": StatesCode.SUCCESS, "total": total, "data": worker_list }
def delete_workers_api(): worker_names = [] if request.method == 'POST': worker_id = request.form.get('worker_id', None) delete_all = request.form.get('delete_all') if worker_id == None and (delete_all == "false" or delete_all == None): raise RQMonitorException('Worker ID not received', status_code=400) if delete_all == "true": for worker_instance in Worker.all(): worker_names.append(worker_instance.name) delete_workers(worker_names) else: worker_names.append(worker_id) delete_workers([worker_id]) return { 'message': 'Successfully deleted worker/s {0}'.format(", ".join(worker_names)) }
def list_workers_api(): workers_list = Worker.all() rq_workers = [] for worker in workers_list: rq_workers.append({ 'worker_name': worker.name, 'listening_on': ', '.join(queue.name for queue in worker.queues), 'status': worker.get_state() if not is_suspended(get_current_connection()) else "suspended", 'current_job_id': worker.get_current_job_id(), 'success_jobs': worker.successful_job_count, 'failed_jobs': worker.failed_job_count, }) return { 'data': rq_workers, }
def delete_workers_api(): worker_names = [] if request.method == 'POST': worker_id = request.form.get('worker_id', None) delete_all = request.form.get('delete_all') if worker_id is None and (delete_all is "false" or delete_all is None): raise RQMonitorException('Worker ID not received', status_code=400) try: if delete_all == "true": for worker_instance in Worker.all(): worker_names.append(worker_instance.name) delete_workers(worker_names) else: worker_names.append(worker_id) delete_workers([worker_id]) except ActionFailed: raise RQMonitorException('Unable to delete worker/s', status_code=500) return { 'message': 'Successfully deleted worker/s {0}'.format(", ".join(worker_names)) } raise RQMonitorException('Invalid HTTP Request type', status_code=400)
def _all_workers(rq): from rq.worker import Worker return Worker.all(connection=rq.connection)
#!/usr/bin/env python3 # # Created by Lukas Lüftinger on 14/01/2018. # from redis import Redis from rq import Queue, get_failed_queue from rq.worker import Worker from pprint import pprint PHENDB_QUEUE = "phenDB" rconn = Redis() workers = Worker.all(connection=rconn) phendb_worker = workers[0] q = Queue(PHENDB_QUEUE, connection=rconn) print("Successfully executed: {se}\nFailed: {fe}\nTotal time worked: {tt}\n".format(se=phendb_worker.successful_job_count, fe=phendb_worker.failed_job_count, tt=phendb_worker.total_working_time)) print("Currently active workers: ") pprint([x.name for x in workers]) print("Currently enqueued jobs: ") pprint(q.job_ids) print("Failed jobs:") fque = Queue('failed', connection=rconn) pprint(fque.job_ids)
from redis import Redis from rq import use_connection, Queue, get_current_job, Connection, cancel_job from rq.worker import Worker from helpers import cocktailSort, make_list import time import sys import os import signal redis = Redis('127.0.0.1', 6379, password='') q = Queue(connection=redis) workers = Worker.all(connection=redis) print(workers) def print_all_jobs(): print(q.job_ids) jobs = q.job_ids for i in range(0, len(jobs)): print(q.fetch_job(jobs[i]).status) def add_job(list_size, id): job = q.enqueue(cocktailSort, [make_list(list_size), [1, 2, 3, 4]], job_id=id) if job is not None: print("New Job Added, Status: {}".format(job.status)) else: print("Job not Queued")
def GetWorksByQueueName(name, host=None, port=None): from rq.worker import Worker Q = GetQueue(name, host, port) w = Worker(Q, connection=default_redis) return w.all(connection=default_redis)
def workers(cls): cls.connect() remove_ghost_workers() return [{'name': w.name, 'key': w.key, 'pid': w.pid, 'state': w.state, 'stopped': w.stopped, 'queues': w.queue_names()} for w in Worker.all()]