def info(cli_config, interval, raw, only_queues, only_workers, by_queue, queues, **options): """RQ command-line monitor.""" if only_queues: func = show_queues elif only_workers: func = show_workers else: func = show_both try: with Connection(cli_config.connection): if queues: qs = list(map(cli_config.queue_class, queues)) else: qs = cli_config.queue_class.all() for queue in qs: clean_registries(queue) clean_worker_registry(queue) refresh(interval, func, qs, raw, by_queue, cli_config.queue_class, cli_config.worker_class) except ConnectionError as e: click.echo(e) sys.exit(1) except KeyboardInterrupt: click.echo() sys.exit(0)
def get_statistics(run_maintenance_tasks=False): queues = [] for index, config in enumerate(QUEUES_LIST): queue = get_queue_by_index(index) connection = queue.connection connection_kwargs = connection.connection_pool.connection_kwargs if run_maintenance_tasks: clean_registries(queue) clean_worker_registry(queue) # Raw access to the first item from left of the redis list. # This might not be accurate since new job can be added from the left # with `at_front` parameters. # Ideally rq should supports Queue.oldest_job last_job_id = connection.lindex(queue.key, 0) last_job = queue.fetch_job( last_job_id.decode('utf-8')) if last_job_id else None if last_job: oldest_job_timestamp = to_localtime(last_job.enqueued_at)\ .strftime('%Y-%m-%d, %H:%M:%S') else: oldest_job_timestamp = "-" # parse_class and connection_pool are not needed and not JSON serializable connection_kwargs.pop('parser_class', None) connection_kwargs.pop('connection_pool', None) queue_data = { 'name': queue.name, 'jobs': queue.count, 'oldest_job_timestamp': oldest_job_timestamp, 'index': index, 'connection_kwargs': connection_kwargs } connection = get_connection(queue.name) queue_data['workers'] = Worker.count(queue=queue) finished_job_registry = FinishedJobRegistry(queue.name, connection) started_job_registry = StartedJobRegistry(queue.name, connection) deferred_job_registry = DeferredJobRegistry(queue.name, connection) failed_job_registry = FailedJobRegistry(queue.name, connection) scheduled_job_registry = ScheduledJobRegistry(queue.name, connection) queue_data['finished_jobs'] = len(finished_job_registry) queue_data['started_jobs'] = len(started_job_registry) queue_data['deferred_jobs'] = len(deferred_job_registry) queue_data['failed_jobs'] = len(failed_job_registry) queue_data['scheduled_jobs'] = len(scheduled_job_registry) queues.append(queue_data) return {'queues': queues}
def test_clean_registries(self): """clean_registries() cleans Started and Finished job registries.""" queue = Queue(connection=self.testconn) finished_job_registry = FinishedJobRegistry(connection=self.testconn) self.testconn.zadd(finished_job_registry.key, 1, 'foo') started_job_registry = StartedJobRegistry(connection=self.testconn) self.testconn.zadd(started_job_registry.key, 1, 'foo') clean_registries(queue) self.assertEqual(self.testconn.zcard(finished_job_registry.key), 0) self.assertEqual(self.testconn.zcard(started_job_registry.key), 0)
def test_clean_registries_with_serializer(self): """clean_registries() cleans Started and Finished job registries (with serializer).""" queue = Queue(connection=self.testconn, serializer=JSONSerializer) finished_job_registry = FinishedJobRegistry(connection=self.testconn, serializer=JSONSerializer) self.testconn.zadd(finished_job_registry.key, {'foo': 1}) started_job_registry = StartedJobRegistry(connection=self.testconn, serializer=JSONSerializer) self.testconn.zadd(started_job_registry.key, {'foo': 1}) failed_job_registry = FailedJobRegistry(connection=self.testconn, serializer=JSONSerializer) self.testconn.zadd(failed_job_registry.key, {'foo': 1}) clean_registries(queue) self.assertEqual(self.testconn.zcard(finished_job_registry.key), 0) self.assertEqual(self.testconn.zcard(started_job_registry.key), 0) self.assertEqual(self.testconn.zcard(failed_job_registry.key), 0)