예제 #1
0
def status():
    """Return a dictionary with the status of the Celery task processing system."""
    inspect = app.control.inspect()

    # query workforce statistics using control.inspect API and extract some relevant data from it
    stats = inspect.stats() or {}
    active = inspect.active()
    reserved = inspect.reserved()
    active_queues = inspect.active_queues()
    workers = [{
        'name': worker_name,
        'queues': [q['name'] for q in active_queues.get(worker_name, [])],
        'tasks_processed': sum(worker_stats['total'].values()),
        'tasks_active': len(active.get(worker_name, [])),
        'tasks_reserved': len(reserved.get(worker_name, [])),
        'prefetch_count': worker_stats['prefetch_count'],
        'concurrency': worker_stats['pool']['max-concurrency'],
    } for worker_name, worker_stats in stats.items()]

    if 'redis://' in app.conf.broker_url:
        queue_names = [q.name for q in WORKER_QUEUE_CONFIGURATION['default']]

        # use flower to not reinvent the wheel on querying queue statistics
        broker = flower.utils.broker.Broker(app.conf.broker_url, broker_options=app.conf.broker_transport_options)
        queue_stats = broker.queues(queue_names).result()

        queues = [{'name': x['name'], 'tasks_pending': x['messages']} for x in queue_stats]
    else:
        raise NotImplementedError('Currently only Redis is supported!')

    alerts = []
    if not workers:
        alerts.append('No active workers!')
    if len(workers) > 9000:
        alerts.append('Number of workers is OVER 9000!!!!1111')

    return {
        'alerts': alerts,
        'workers': workers,
        'queues': queues
    }
예제 #2
0
def status():
    """Return a dictionary with the status of the Celery task processing system."""
    inspect = app.control.inspect()

    # query workforce statistics using control.inspect API and extract some relevant data from it
    stats = inspect.stats() or {}
    active = inspect.active()
    reserved = inspect.reserved()
    active_queues = inspect.active_queues()
    workers = [{
        'name':
        worker_name,
        'queues': [q['name'] for q in active_queues.get(worker_name, [])],
        'tasks_processed':
        sum(worker_stats['total'].values()),
        'tasks_active':
        len(active.get(worker_name, [])),
        'tasks_reserved':
        len(reserved.get(worker_name, [])),
        'prefetch_count':
        worker_stats['prefetch_count'],
        'concurrency':
        worker_stats['pool']['max-concurrency'],
    } for worker_name, worker_stats in stats.items()]

    workers = sorted(workers, key=lambda k: (k['name']), reverse=False)

    if 'redis://' in app.conf.broker_url:
        queue_names = [q.name for q in QUEUES_MATCHING_ROLES['queuemonitor']]

        # on localhost and remote workers there is no event loop. This causes an exception.
        # Inspired on https://github.com/tornadoweb/tornado/issues/2352 and
        # https://github.com/tornadoweb/tornado/issues/2308
        # this attempt seems to create an event loop without any further issues. This will allow the code to complete.
        # the reason _why_ there was no event loop in these cases is completely unclear to me. The code in
        # flower just uses @gen.coroutine and is not to blame.
        # https://github.com/mher/flower/blob/master/flower/utils/broker.py
        # 'solves': RuntimeError: There is no current event loop in thread 'Thread-3'.
        try:
            import asyncio
            asyncio.set_event_loop(asyncio.new_event_loop())
        except BaseException:
            # an eventloop already exists.
            pass

        # use flower to not reinvent the wheel on querying queue statistics
        queue_stats = []
        try:
            broker = flower.utils.broker.Broker(
                app.conf.broker_url,
                broker_options=app.conf.broker_transport_options)
            queue_stats = broker.queues(queue_names).result()
        except RuntimeError as e:
            log.error("Could not connect to flower to retrieve queue stats.")
            log.exception(e)

        queues = [{
            'name': x['name'],
            'tasks_pending': x['messages']
        } for x in queue_stats]
    else:
        raise NotImplementedError('Currently only Redis is supported!')

    queues = sorted(queues, key=lambda k: (k['name']), reverse=False)

    alerts = []
    if not workers:
        alerts.append('No active workers!')
    if len(workers) > 9000:
        alerts.append('Number of workers is OVER 9000!!!!1111')

    return {'alerts': alerts, 'workers': workers, 'queues': queues}