def info(cli_config, interval, raw, only_queues, only_workers, by_queue, queues, **options): """RQ command-line monitor.""" if only_queues: func = show_queues elif only_workers: func = show_workers else: func = show_both try: with Connection(cli_config.connection): if queues: qs = list(map(cli_config.queue_class, queues)) else: qs = cli_config.queue_class.all() for queue in qs: clean_registries(queue) clean_worker_registry(queue) refresh(interval, func, qs, raw, by_queue, cli_config.queue_class, cli_config.worker_class) except ConnectionError as e: click.echo(e) sys.exit(1) except KeyboardInterrupt: click.echo() sys.exit(0)
def test_clean_large_registry(self): """ clean_registry() splits invalid_keys into multiple lists for set removal to avoid sending more than redis can receive """ MAX_WORKERS = 41 MAX_KEYS = 37 # srem is called twice per invalid key batch: once for WORKERS_BY_QUEUE_KEY; once for REDIS_WORKER_KEYS SREM_CALL_COUNT = 2 queue = Queue(name='foo') for i in range(MAX_WORKERS): worker = Worker([queue]) register(worker) with patch('rq.worker_registration.MAX_KEYS', MAX_KEYS), \ patch.object(queue.connection, 'pipeline', wraps=queue.connection.pipeline) as pipeline_mock: # clean_worker_registry creates a pipeline with a context manager. Configure the mock using the context # manager entry method __enter__ pipeline_mock.return_value.__enter__.return_value.srem.return_value = None pipeline_mock.return_value.__enter__.return_value.execute.return_value = [ 0 ] * MAX_WORKERS clean_worker_registry(queue) expected_call_count = (ceildiv(MAX_WORKERS, MAX_KEYS)) * SREM_CALL_COUNT self.assertEqual( pipeline_mock.return_value.__enter__.return_value.srem. call_count, expected_call_count)
def get_statistics(run_maintenance_tasks=False): queues = [] for index, config in enumerate(QUEUES_LIST): queue = get_queue_by_index(index) connection = queue.connection connection_kwargs = connection.connection_pool.connection_kwargs if run_maintenance_tasks: clean_registries(queue) clean_worker_registry(queue) # Raw access to the first item from left of the redis list. # This might not be accurate since new job can be added from the left # with `at_front` parameters. # Ideally rq should supports Queue.oldest_job last_job_id = connection.lindex(queue.key, 0) last_job = queue.fetch_job( last_job_id.decode('utf-8')) if last_job_id else None if last_job: oldest_job_timestamp = to_localtime(last_job.enqueued_at)\ .strftime('%Y-%m-%d, %H:%M:%S') else: oldest_job_timestamp = "-" # parse_class and connection_pool are not needed and not JSON serializable connection_kwargs.pop('parser_class', None) connection_kwargs.pop('connection_pool', None) queue_data = { 'name': queue.name, 'jobs': queue.count, 'oldest_job_timestamp': oldest_job_timestamp, 'index': index, 'connection_kwargs': connection_kwargs } connection = get_connection(queue.name) queue_data['workers'] = Worker.count(queue=queue) finished_job_registry = FinishedJobRegistry(queue.name, connection) started_job_registry = StartedJobRegistry(queue.name, connection) deferred_job_registry = DeferredJobRegistry(queue.name, connection) failed_job_registry = FailedJobRegistry(queue.name, connection) scheduled_job_registry = ScheduledJobRegistry(queue.name, connection) queue_data['finished_jobs'] = len(finished_job_registry) queue_data['started_jobs'] = len(started_job_registry) queue_data['deferred_jobs'] = len(deferred_job_registry) queue_data['failed_jobs'] = len(failed_job_registry) queue_data['scheduled_jobs'] = len(scheduled_job_registry) queues.append(queue_data) return {'queues': queues}
def test_clean_registry(self): """clean_registry removes worker keys that don't exist in Redis""" queue = Queue(name='foo') worker = Worker([queue]) register(worker) redis = worker.connection self.assertTrue(redis.sismember(worker.redis_workers_keys, worker.key)) self.assertTrue(redis.sismember(REDIS_WORKER_KEYS, worker.key)) clean_worker_registry(queue) self.assertFalse(redis.sismember(worker.redis_workers_keys, worker.key)) self.assertFalse(redis.sismember(REDIS_WORKER_KEYS, worker.key))