def get_queue_depths(host, username, password, vhost):
    cl = Client(host, username, password)
    depths = {}
    queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
예제 #2
0
 def countMessagesQueue(self, vhost, queue):
     qtd = 0
     cl = Client('{}:{}'.format(self.host, self.web_port), self.user,
                 self.password)
     try:
         qtd = cl.get_queue_depth(vhost, queue)
     except HTTPError as e:
         logger.error(e)
         raise e
     return qtd
예제 #3
0
def get_queue_depths(host, username, password, vhost):
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q["name"] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test":
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
예제 #4
0
def get_rabbitmq_queue_length(q):
    """Fetch queue length from RabbitMQ for a given queue.

    Used periodically to decide if we want to queue more functions or not.

    Uses the Management HTTP API of RabbitMQ, since the Celery client doesn not have access to these counts.
    """

    from pyrabbit.api import Client

    cl = Client(settings.SNOOP_RABBITMQ_HTTP_URL, 'guest', 'guest')
    return cl.get_queue_depth('/', q)
예제 #5
0
async def monitor(request):
    #data loaded for the first time from the function call
    global refresh
    db = request.app['db']
    await db.collection.drop()
    
    if refresh == 0:
        
        #Client creation
        try:
            client = Client('localhost:15672', 'guest', 'guest')
        except:
            return {"result":"Client not created"}

        #get all the bindings()
        try:
            bindings = client.get_bindings()
            n = len(bindings)
        except:
            return {"result":"binding error"}   

        #final list of all the rows 
        result_list = []
        for i in range(n):
            #dict for storing values for each connection
            try:
                dict_each = {}
                vhost_name = bindings[i]['vhost']
                dict_each['vhost_name'] = vhost_name
                dict_each['exchange_name'] = bindings[i]['source']
                queue_name = bindings[i]['destination']
                dict_each['queue_name'] = queue_name
                dict_each['queue_size'] = client.get_queue_depth(vhost=vhost_name, name=queue_name)  
                result_list.append(dict_each)
            except:
                return {"result" : "data not found"}

        #insert data in the db
        try:
            await db.collection.insert_many(i for i in result_list)

        except:
            return {'result': "data not stored in the db"}
        refresh+=1
        return {'result_list': result_list}       

    #data retrieved from the database after refresh 
    if refresh>0:
        result_list = []
        async for document in db.collection.find():
            result_list.append(document)   
        return {"result_list" : result_list}
def get_queue_depths(host, username, password, vhost):
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test": #pyrabbit
            continue
        elif queue.endswith('.pidbox') or queue.startswith('celeryev.'): #celery
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
def get_queue_depths(host, username, password, vhost):
    """ Fetches queue depths from rabbitmq instance."""
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test":  #pyrabbit
            continue
        elif queue.startswith('amq.gen-'):  #Anonymous queues
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
예제 #8
0
def get_queue_depths(host, username, password, vhost):
    cl = Client(host, username, password)
    if not cl.is_alive():
        raise Exception("Failed to connect to rabbitmq")
    depths = {}
    queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
    for queue in queues:
        if queue == "aliveness-test":  #pyrabbit
            continue
        elif queue.endswith('.pidbox') or queue.startswith(
                'celeryev.'):  #celery
            continue
        depths[queue] = cl.get_queue_depth(vhost, queue)
    return depths
예제 #9
0
def rebuild_collection(collection_name):
    """
    Will grab all recs from the database and send them to solr
    """
    # first, fail if we can not monitor queue length before we queue anything
    u = urlparse(app.conf['OUTPUT_CELERY_BROKER'])
    rabbitmq = PyRabbitClient(u.hostname + ':' + str(u.port + 10000),
                              u.username, u.password)
    if not rabbitmq.is_alive('master_pipeline'):
        logger.error(
            'failed to connect to rabbitmq with PyRabbit to monitor queue')
        sys.exit(1)

    now = get_date()
    if collection_name.startswith('http'):
        solr_urls = [collection_name]
    else:
        solr_urls = collection_to_urls(collection_name)

    logger.info('Sending all records to: %s', ';'.join(solr_urls))
    sent = 0

    batch = []
    _tasks = []
    with app.session_scope() as session:
        # master db only contains valid documents, indexing task will make sure that incomplete docs are rejected
        for rec in session.query(Records) \
            .options(load_only(Records.bibcode, Records.updated, Records.processed)) \
            .yield_per(1000):

            sent += 1
            if sent % 1000 == 0:
                logger.debug('Sending %s records', sent)

            batch.append(rec.bibcode)
            if len(batch) > 1000:
                t = tasks.task_rebuild_index.delay(batch,
                                                   force=True,
                                                   update_solr=True,
                                                   update_metrics=False,
                                                   update_links=False,
                                                   ignore_checksums=True,
                                                   solr_targets=solr_urls,
                                                   update_timestamps=False)
                _tasks.append(t)
                batch = []

    if len(batch) > 0:
        t = tasks.task_rebuild_index.delay(batch,
                                           force=True,
                                           update_solr=True,
                                           update_metrics=False,
                                           update_links=False,
                                           ignore_checksums=True,
                                           solr_targets=solr_urls,
                                           update_timestamps=False)
        _tasks.append(t)

    logger.info('Done queueing bibcodes for rebuilding collection %s',
                collection_name)
    # now wait for queue to empty
    queue_length = 1
    while queue_length > 0:
        queue_length = rabbitmq.get_queue_depth('master_pipeline',
                                                'rebuild-index')
        stime = queue_length * 0.1
        logger.info(
            'Waiting %s for rebuild-collection tasks to finish, queue_length %s, sent %s'
            % (stime, queue_length, sent))
        time.sleep(stime)

    logger.info('Done rebuilding collection %s, sent %s records',
                collection_name, sent)
rabbit_queue = os.environ["RABBIT_QUEUE"]
marathon_url = os.environ["MARATHON_URL"]
marathon_port = os.environ["MARATHON_PORT"]
marathon_app = os.environ["MARATHON_APP"]

# marathon connections strings
url = "http://" + marathon_url + ":" + marathon_port + "/v2/apps" + marathon_app
headers = {'content-type': "application/json", 'cache-control': "no-cache"}

# connect to rabbit
rabbit_connection = Client(rabbit_host + ":" + rabbit_api_port, rabbit_user,
                           rabbit_password)
print "connected to rabbit"

# get rabbit queue size and figure out amount of tasks needed
rabbit_size = rabbit_connection.get_queue_depth(rabbit_vhost, rabbit_queue)
workers_needed = int(int(rabbit_size) / int(scale_every_x_waiting_messages))
if workers_needed < int(min_task_size):
    workers_needed = int(min_task_size)
elif workers_needed > int(max_task_size):
    workers_needed = int(max_task_size)

# get current number of tasks
response = requests.request("GET", url, headers=headers)
app_data = response.json()

# if current number is different then required number change it to required number
if int(app_data["app"]["instances"]) != int(workers_needed):
    print "scaling from " + str(app_data["app"]["instances"]) + " to " + str(
        workers_needed) + " workers"
    payload = "{\"instances\": " + str(workers_needed) + "}"
예제 #11
0
def getQueueMessagesCount(rabbit_ip, rabbit_port, rabbit_user, rabbit_password, rabbit_host, queue_name):
    cl = AdminClient(rabbit_ip + ':1' + rabbit_port, rabbit_user, rabbit_password)
    messages_cnt = cl.get_queue_depth(vhost=rabbit_host.replace("/", ""), name=queue_name)
    return messages_cnt