def Main(): cgiEnv = lib_common.CgiEnv() configNam = cgiEnv.m_entity_id_dict["Url"] namVHost = cgiEnv.m_entity_id_dict["VHost"] nodeManager = survol_rabbitmq_manager.MakeUri(configNam) creds = lib_credentials.GetCredentials("RabbitMQ", configNam) # cl = Client('localhost:12345', 'guest', 'guest') cl = Client(configNam, creds[0], creds[1]) grph = cgiEnv.GetGraph() nodVHost = survol_rabbitmq_vhost.MakeUri(configNam, namVHost) grph.add((nodeManager, lib_common.MakeProp("virtual host node"), nodVHost)) for quList in cl.get_queues(namVHost): namQueue = quList["name"] DEBUG("q=%s", namQueue) nodeQueue = survol_rabbitmq_queue.MakeUri(configNam, namVHost, namQueue) managementUrl = rabbitmq.ManagementUrlPrefix(configNam, "queues", namVHost, namQueue) grph.add((nodeQueue, lib_common.MakeProp("Management"), lib_common.NodeUrl(managementUrl))) grph.add((nodVHost, lib_common.MakeProp("Queue"), nodeQueue)) cgiEnv.OutCgiRdf()
def main(): rabbit = None outcome = None try: # Ensure there are no paralell runs of this script lock.acquire(timeout=5) # Connect to Rabbit nullwrite = NullWriter() oldstdout = sys.stdout sys.stdout = nullwrite # disable output rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']), rabbitmq['username'], rabbitmq['password']) if not rabbit.is_alive(): raise Exception('Cannot connect to RabbitMQ') queues = rabbit.get_queues(rabbitmq['vhost']) sys.stdout = oldstdout # enable output # Build outcome if args.d == 'queues': outcome = {'data': []} for queue in queues: outcome['data'].append({'{#QUEUE}': queue['name']}) except LockTimeout: print 'Lock not acquired, exiting' except AlreadyLocked: print 'Already locked, exiting' except Exception, e: print type(e) print 'Error: %s' % e
def __init__(self, mq_server = None, virtual_host = cfg.CB_CLUSTER_TAG): if mq_server == None: mq_server = cfg.RABBITMQ_IP self.connection = Connection(host= mq_server, userid="guest", password="******", virtual_host = virtual_host) self.manager = Client(mq_server+":55672", "guest", "guest")
def connect(options): server = environ.get("server", "localhost:15672") client = Client(server, environ.get("username", "guest"), environ.get("password", "guest")) client.is_admin = True return client
def queues(self, ): cl = Client('{}:15672'.format(self.config["rabbitmq"]["server"]), self.config["rabbitmq"]["user"], self.config["rabbitmq"]["password"]) queues = [q['name'] for q in cl.get_queues()] queues.remove("scan-result") return queues
def Main(): cgiEnv = lib_common.CgiEnv() configNam = cgiEnv.m_entity_id_dict["Url"] namVHost = cgiEnv.m_entity_id_dict["VHost"] nodeManager = survol_rabbitmq_manager.MakeUri(configNam) creds = lib_credentials.GetCredentials("RabbitMQ", configNam) # cl = Client('localhost:12345', 'guest', 'guest') cl = Client(configNam, creds[0], creds[1]) grph = cgiEnv.GetGraph() nodVHost = survol_rabbitmq_vhost.MakeUri(configNam, namVHost) grph.add((nodeManager, lib_common.MakeProp("virtual host node"), nodVHost)) for objExchange in cl.get_exchanges(namVHost): namExchange = objExchange["name"] sys.stderr.write("namExchange=%s\n" % (namExchange)) nodeExchange = survol_rabbitmq_exchange.MakeUri( configNam, namVHost, namExchange) managementUrl = rabbitmq.ManagementUrlPrefix(configNam, "exchanges", namVHost, namExchange) grph.add((nodeExchange, lib_common.MakeProp("Management"), lib_common.NodeUrl(managementUrl))) grph.add((nodVHost, lib_common.MakeProp("Exchange"), nodeExchange)) cgiEnv.OutCgiRdf()
class rabbitmonitor(threading.Thread): def __init__(self): threading.Thread.__init__ (self) self.scheduler = sched.scheduler(time.time,time.sleep) self.cl = Client("localhost:55672","guest","guest") def rabbit_stats(self): rabbit_data={} try: exchanges = self.cl.get_exchanges() queues = self.cl.get_queues() binding = self.cl.get_bindings() rabbit_data['time'] = time.time() rabbit_data['exchanges'] = exchanges rabbit_data['queues'] = queues for q in rabbit_data['queues']: ## delete a tmp query with no consumer ## zombie queue if "amq" in q['name'] and q['consumers'] == 0: vhost = q['vhost'] self.cl.delete_queue(vhost,q['name']) print 'deleting queue' except: print 'error' pass; def run(self): while(1): self.scheduler.enter(400,1,self.rabbit_stats,()); self.scheduler.run()
def main(): rabbit = None try: # Ensure there are no paralell runs of this script lock.acquire(timeout=5) # Connect to Rabbit nullwrite = NullWriter() oldstdout = sys.stdout sys.stdout = nullwrite # disable output rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']), rabbitmq['username'], rabbitmq['password']) if not rabbit.is_alive(): raise Exception('Cannot connect to RabbitMQ') queues = rabbit.get_queues(rabbitmq['vhost']) sys.stdout = oldstdout # enable output # Build outcome if args.d == 'queues': outcome = {'data': []} for queue in queues: outcome['data'].append({'{#QUEUE}': queue['name']}) except LockTimeout: print 'Lock not acquired, exiting' except AlreadyLocked: print 'Already locked, exiting' except Exception, e: print type(e) print 'Error: %s' % e
class MqSmokeTest(unittest.TestCase): def setUp(self): self.cl = Client("mq:15672", os.environ.get("SFM_RABBITMQ_USER"), os.environ.get("SFM_RABBITMQ_PASSWORD")) self.assertTrue(self.cl.is_alive()) def test_exchange(self): exchanges = self.cl.get_exchanges() for exchange in exchanges: if exchange["name"] == "sfm_exchange": break else: self.assertTrue(False, "Exchange not found.") def test_queues(self): queues = self.cl.get_queues() queues_names = {queue["name"] for queue in queues} # Add additional queue names as new components are added. self.assertTrue( queues_names.issuperset( set([ "flickr_harvester", "flickr_exporter", "sfm_ui", "twitter_harvester", "twitter_rest_harvester", "twitter_rest_harvester_priority", "twitter_rest_exporter", "twitter_stream_exporter", "tumblr_harvester", "tumblr_exporter" ])))
def Main(): cgiEnv = lib_common.ScriptEnvironment() configNam = cgiEnv.GetId() nodeManager = survol_rabbitmq_manager.MakeUri(configNam) creds = lib_credentials.GetCredentials("RabbitMQ", configNam) # cl = Client('localhost:12345', 'guest', 'guest') cl = Client(configNam, creds[0], creds[1]) grph = cgiEnv.GetGraph() try: lstOverview = cl.get_overview() except Exception as exc: lib_common.ErrorMessageHtml("Caught:" + str(exc)) for keyOverview in lstOverview: valOverview = lstOverview[keyOverview] valClean = valOverview # Otherwise it does not work as these chars should be espaced. # TODO: Nice display for Python lists and dicts. valClean = str(valClean).replace("{", "").replace("}", "") grph.add((nodeManager, lib_common.MakeProp(keyOverview), lib_util.NodeLiteral(valClean))) cgiEnv.OutCgiRdf()
def Main(): cgiEnv = lib_common.ScriptEnvironment() config_nam = cgiEnv.m_entity_id_dict["Url"] nam_connection = cgiEnv.m_entity_id_dict["Connection"] node_manager = survol_rabbitmq_manager.MakeUri(config_nam) creds = lib_credentials.GetCredentials("RabbitMQ", config_nam) cl = Client(config_nam, creds[0], creds[1]) grph = cgiEnv.GetGraph() logging.debug("nam_connection=%s", nam_connection) nod_connection = survol_rabbitmq_connection.MakeUri(config_nam, nam_connection) grph.add((node_manager, lib_common.MakeProp("Connection"), nod_connection)) try: connect_list = cl.get_connection(nam_connection) except Exception as exc: lib_common.ErrorMessageHtml("Caught:" + str(exc)) for connect_key in connect_list: connect_val = connect_list[connect_key] if connect_key == "vhost": nod_v_host = survol_rabbitmq_vhost.MakeUri(config_nam, connect_val) grph.add((nod_connection, lib_common.MakeProp("Virtual host"), nod_v_host)) elif connect_key == "user": nod_user = survol_rabbitmq_user.MakeUri(config_nam, connect_val) grph.add((nod_connection, lib_common.MakeProp("User"), nod_user)) elif connect_key == "host": nod_host = lib_uris.gUriGen.HostnameUri(connect_val) grph.add((nod_connection, lib_common.MakeProp("Host"), nod_host)) elif connect_key in ["name", "peer_host", "peer_port"]: pass else: if isinstance(connect_val, six.string_types): connect_val = connect_val.replace(">", "@") logging.debug("connect_key=%s connect_val=%s", connect_key, connect_val) elif isinstance(connect_val, dict): pass elif isinstance(connect_val, tuple): pass elif isinstance(connect_val, list): pass else: pass logging.debug("Literal=%s", lib_util.NodeLiteral(connect_val)) grph.add((nod_connection, lib_common.MakeProp(connect_key), lib_util.NodeLiteral(connect_val))) survol_rabbitmq_connection.AddSockets(grph, nod_connection, nam_connection) cgiEnv.OutCgiRdf()
def Main(): cgiEnv = lib_common.ScriptEnvironment() config_nam = cgiEnv.m_entity_id_dict["Url"] nam_v_host = cgiEnv.m_entity_id_dict["VHost"] node_manager = survol_rabbitmq_manager.MakeUri(config_nam) creds = lib_credentials.GetCredentials("RabbitMQ", config_nam) # cl = Client('localhost:12345', 'guest', '*****') cl = Client(config_nam, creds[0], creds[1]) grph = cgiEnv.GetGraph() nod_v_host = survol_rabbitmq_vhost.MakeUri(config_nam, nam_v_host) grph.add( (node_manager, lib_common.MakeProp("virtual host node"), nod_v_host)) for obj_exchange in cl.get_exchanges(nam_v_host): nam_exchange = obj_exchange["name"] logging.debug("nam_exchange=%s", nam_exchange) node_exchange = survol_rabbitmq_exchange.MakeUri( config_nam, nam_v_host, nam_exchange) management_url = rabbitmq.ManagementUrlPrefix(config_nam, "exchanges", nam_v_host, nam_exchange) grph.add((node_exchange, lib_common.MakeProp("Management"), lib_common.NodeUrl(management_url))) grph.add((nod_v_host, lib_common.MakeProp("Exchange"), node_exchange)) cgiEnv.OutCgiRdf()
def health_iogt(request): status_code = status.HTTP_200_OK if settings.RABBITMQ_MANAGEMENT_INTERFACE: rx = re.compile(r"amqp://(?P<username>[^:]+).*?:(?P<password>[^@]+)." "*/(?P<vhost>[^&]+)") match = rx.search(settings.BROKER_URL) username = match.groupdict()['username'] password = match.groupdict()['password'] vhost = match.groupdict()['vhost'] base_url = settings.RABBITMQ_MANAGEMENT_INTERFACE mq_client = Client(base_url, username, password) queue_data = mq_client.get_queue(vhost, 'celery') messages = queue_data['messages'] rate = queue_data['messages_details']['rate'] if (messages > 0 and rate == 0): status_code = status.HTTP_500_INTERNAL_SERVER_ERROR app_id = environ.get('MARATHON_APP_ID', None) ver = environ.get('MARATHON_APP_VERSION', None) return JsonResponse({'id': app_id, 'version': ver}, status=status_code)
def get_queue_depths(host, username, password, vhost): cl = Client(host, username, password) depths = {} queues = [q['name'] for q in cl.get_queues(vhost=vhost)] for queue in queues: depths[queue] = cl.get_queue_depth(vhost, queue) return depths
def Main(): cgiEnv = lib_common.ScriptEnvironment() configNam = cgiEnv.GetId() nodeManager = survol_rabbitmq_manager.MakeUri(configNam) creds = lib_credentials.GetCredentials("RabbitMQ", configNam) # cl = Client('localhost:12345', 'guest', 'guest') cl = Client(configNam, creds[0], creds[1]) grph = cgiEnv.GetGraph() # >>> cl.get_all_vhosts() # http://localhost:12345/api/vhosts # [{u'name': u'/', u'tracing': False, u'messages_details': {u'rate': 0.0}, u'messages': 0, u'message_stats': {u'deliver_no_ack': 0, u' # publish_out': 0, u'get_no_ack': 13, u'return_unroutable': 0, u'confirm': 0, u'deliver_get': 13, u'publish': 13, u'confirm_details': # {u'rate': 0.0}, u'ack_details': {u'rate': 0.0}, u'get': 0, u'deliver': 0, u'publish_out_details': {u'rate': 0.0}, u'redeliver_detail # s': {u'rate': 0.0}, u'deliver_details': {u'rate': 0.0}, u'deliver_get_details': {u'rate': 0.0}, u'publish_details': {u'rate': 0.0}, # u'publish_in_details': {u'rate': 0.0}, u'ack': 0, u'publish_in': 0, u'return_unroutable_details': {u'rate': 0.0}, u'get_details': {u # 'rate': 0.0}, u'get_no_ack_details': {u'rate': 0.0}, u'deliver_no_ack_details': {u'rate': 0.0}, u'redeliver': 0}, u'messages_unackno # wledged_details': {u'rate': 0.0}, u'messages_ready_details': {u'rate': 0.0}, u'messages_unacknowledged': 0, u'messages_ready': 0}] try: # listVHosts = cl.get_all_vhosts() except Exception as exc: lib_common.ErrorMessageHtml("Caught:" + str(exc)) for objVHost in listVHosts: namVHost = objVHost["name"] logging.debug("q=%s", namVHost) nodeVHost = survol_rabbitmq_vhost.MakeUri(configNam, namVHost) try: grph.add((nodeVHost, lib_common.MakeProp("tracing"), lib_util.NodeLiteral(objVHost["tracing"]))) except KeyError: pass try: grph.add((nodeVHost, lib_common.MakeProp("messages"), lib_util.NodeLiteral(objVHost["messages"]))) except KeyError: pass # http://127.0.0.1:12345/#/vhosts// managementUrl = rabbitmq.ManagementUrlPrefix(configNam, "vhosts", namVHost) grph.add((nodeVHost, lib_common.MakeProp("Management"), lib_common.NodeUrl(managementUrl))) grph.add((nodeManager, lib_common.MakeProp("Virtual host"), nodeVHost)) cgiEnv.OutCgiRdf()
def get_host_action(self, vhost): for host in self.rabbit_config.hosts: cl = Client(f'{host}:15672', self.rabbit_config.username, self.rabbit_config.password) try: cl.is_alive(vhost) return host except APIError: pass
def check_queue(mpc, min_consumers, max_consumers, rabbitmq_server, rabbitmq_vhost, rabbitmq_queue, instance_group): cl = Client(rabbitmq_server, 'admin', 'rabbit4ever') try: messages = cl.get_queue(rabbitmq_vhost, rabbitmq_queue).get('messages') except: bash_delete_auto_resize = "sed -i '/" + rabbitmq_queue + " " + instance_group + " " + mpc + " /d' " + file_path os.system(bash_delete_auto_resize) else: bash_instance_group_info = "gcloud compute instance-groups managed describe " + instance_group + " --zone=us-central1-a" instance_group_info = subprocess.check_output(bash_instance_group_info, shell=True) cur_size = re.findall("targetSize:.*", instance_group_info, re.MULTILINE) if not cur_size: print("Instance group %s does not exist" % instance_group) bash_delete_auto_resize = "sed -i '/" + rabbitmq_queue + " " + instance_group + " " + mpc + " /d' " + file_path os.system(bash_delete_auto_resize) else: cur_max_replicas = re.findall("maxNumReplicas:.*", instance_group_info, re.MULTILINE) if max_consumers == 0: if not cur_max_replicas: print( "Auto scaling for instance group %s is already disabled" % instance_group) else: resize_instance_group(instance_group, 0, 0) bash_delete_auto_resize = "sed -i '/" + rabbitmq_queue + " " + instance_group + " " + mpc + " /d' " + file_path os.system(bash_delete_auto_resize) else: if messages == 0: if not cur_max_replicas: print( "Auto scaling for instance group %s is already disabled" % instance_group) else: resize_instance_group(instance_group, 0, 0) else: consumers = cl.get_queue(rabbitmq_vhost, rabbitmq_queue).get('consumers') if (messages / mpc) != consumers: if not cur_max_replicas: print( "Auto scaling for instance group %s is disabled, going to resize instance group" % instance_group) resize_instance_group(instance_group, min_consumers, max_consumers) else: cur_max_replicas = int( cur_max_replicas[0].split(": ")[1]) max_replicas = max(cur_max_replicas, max_consumers) min_replicas = min( max(messages / mpc, min_consumers), max_replicas) resize_instance_group(instance_group, min_replicas, max_replicas)
def countMessagesQueue(self, vhost, queue): qtd = 0 cl = Client('{}:{}'.format(self.host, self.web_port), self.user, self.password) try: qtd = cl.get_queue_depth(vhost, queue) except HTTPError as e: logger.error(e) raise e return qtd
def getRabbitmqQueueDetails(queueName, host='/'): cluster = constant.config['cluster'] url = constant.rabbitmqCred[cluster] user = constant.rabbitmqCred['username'] passwd = constant.rabbitmqCred['passwd'] obj = Client(url, user, passwd) try: return obj.get_queue(vhost=host, name=queueName) except pyrabbit.http.HTTPError: return {}
def get_bindings(self): client = Client("localhost:15672", "guest", "guest") bindings = client.get_bindings() bindings_result = [] for b in bindings: if b["source"] == "exchange_baby_monitor": bindings_result.append(b) return bindings_result
def Main(): cgiEnv = lib_common.CgiEnv() configNam = cgiEnv.GetId() nodeManager = survol_rabbitmq_manager.MakeUri(configNam) creds = lib_credentials.GetCredentials("RabbitMQ", configNam) # cl = Client('localhost:12345', 'guest', 'guest') cl = Client(configNam, creds[0], creds[1]) grph = cgiEnv.GetGraph() # cl.is_alive() #>>> cl.get_users() #[{u'hashing_algorithm': u'rabbit_password_hashing_sha256', u'name': u'guest', u'tags': u'administrator', u'password_hash': u'xxxxxx'}] try: # listUsers = cl.get_users() except: # exc = sys.exc_info()[1] lib_common.ErrorMessageHtml("Caught:" + str(exc)) for objUser in listUsers: namUser = objUser["name"] DEBUG("q=%s", namUser) nodeUser = survol_rabbitmq_user.MakeUri(configNam, namUser) try: grph.add((nodeUser, lib_common.MakeProp("Tags"), lib_common.NodeLiteral(objUser["tags"]))) except KeyError: pass try: grph.add((nodeUser, lib_common.MakeProp("Hashing algorithm"), lib_common.NodeLiteral(objUser["hashing_algorithm"]))) except KeyError: pass # http://127.0.0.1:12345/#/users/guest managementUrl = rabbitmq.ManagementUrlPrefix(configNam, "users", namUser) grph.add((nodeUser, lib_common.MakeProp("Management"), lib_common.NodeUrl(managementUrl))) grph.add((nodeManager, lib_common.MakeProp("User"), nodeUser)) cgiEnv.OutCgiRdf()
def main(): rabbit = None try: # Ensure there are no paralell runs of this script lock.acquire(timeout=5) # Connect to Rabbit nullwrite = NullWriter() oldstdout = sys.stdout sys.stdout = nullwrite # disable output rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']), rabbitmq['username'], rabbitmq['password']) if not rabbit.is_alive(): raise Exception('Cannot connect to RabbitMQ') vhost = rabbit.get_vhost(rabbitmq['vhost']) queues = rabbit.get_queues(rabbitmq['vhost']) sys.stdout = oldstdout # enable output # Build outcome metrics = [] for key in keys: if type(key) == dict and 'vhost' in key: for subkey in key['vhost']: if subkey in vhost: metrics.append( Metric(rabbitmq['host'], 'rabbitmq.%s.%s' % ('vhost', subkey), vhost[subkey])) elif type(key) == dict and 'vhost.message_stats' in key: for subkey in key['vhost.message_stats']: if subkey in vhost['message_stats']: metrics.append( Metric( rabbitmq['host'], 'rabbitmq.%s.%s' % ('vhost.message_stats', subkey), vhost['message_stats'][subkey])) elif type(key) == dict and 'queues' in key: for queue in queues: for subkey in key['queues']: if subkey in queue: metrics.append( Metric( rabbitmq['host'], 'rabbitmq.%s.%s[%s]' % ('queue', subkey, queue['name']), queue[subkey])) # Send packet to zabbix send_to_zabbix(metrics, zabbix_host, zabbix_port) except LockTimeout: print 'Lock not acquired, exiting' except AlreadyLocked: print 'Already locked, exiting' except Exception, e: print type(e) print 'Error: %s' % e
def get_queue_depths(host, username, password, vhost): cl = Client(host, username, password) if not cl.is_alive(): raise Exception("Failed to connect to rabbitmq") depths = {} queues = [q["name"] for q in cl.get_queues(vhost=vhost)] for queue in queues: if queue == "aliveness-test": continue depths[queue] = cl.get_queue_depth(vhost, queue) return depths
def connect(self): connections['default'].close() rabbitmq = ALCO_SETTINGS['RABBITMQ'] self.amqp = Connection(**rabbitmq) self.redis = redis.Redis(**ALCO_SETTINGS['REDIS']) self.insert_thread = Thread(target=self.inserter_loop) self.insert_thread.start() hostname = '%s:%s' % (rabbitmq['host'], ALCO_SETTINGS['RABBITMQ_API_PORT']) self.rabbit = Client(hostname, rabbitmq['userid'], rabbitmq['password']) self.vhost = rabbitmq['virtual_host']
def get_rabbitmq_queue_length(q): """Fetch queue length from RabbitMQ for a given queue. Used periodically to decide if we want to queue more functions or not. Uses the Management HTTP API of RabbitMQ, since the Celery client doesn not have access to these counts. """ from pyrabbit.api import Client cl = Client(settings.SNOOP_RABBITMQ_HTTP_URL, 'guest', 'guest') return cl.get_queue_depth('/', q)
async def monitor(request): #data loaded for the first time from the function call global refresh db = request.app['db'] await db.collection.drop() if refresh == 0: #Client creation try: client = Client('localhost:15672', 'guest', 'guest') except: return {"result":"Client not created"} #get all the bindings() try: bindings = client.get_bindings() n = len(bindings) except: return {"result":"binding error"} #final list of all the rows result_list = [] for i in range(n): #dict for storing values for each connection try: dict_each = {} vhost_name = bindings[i]['vhost'] dict_each['vhost_name'] = vhost_name dict_each['exchange_name'] = bindings[i]['source'] queue_name = bindings[i]['destination'] dict_each['queue_name'] = queue_name dict_each['queue_size'] = client.get_queue_depth(vhost=vhost_name, name=queue_name) result_list.append(dict_each) except: return {"result" : "data not found"} #insert data in the db try: await db.collection.insert_many(i for i in result_list) except: return {'result': "data not stored in the db"} refresh+=1 return {'result_list': result_list} #data retrieved from the database after refresh if refresh>0: result_list = [] async for document in db.collection.find(): result_list.append(document) return {"result_list" : result_list}
def get_queue_depths(host, username, password, vhost): cl = Client(host, username, password) if not cl.is_alive(): raise Exception("Failed to connect to rabbitmq") depths = {} queues = [q['name'] for q in cl.get_queues(vhost=vhost)] for queue in queues: if queue == "aliveness-test": #pyrabbit continue elif queue.endswith('.pidbox') or queue.startswith('celeryev.'): #celery continue depths[queue] = cl.get_queue_depth(vhost, queue) return depths
def get_queue_depths(host, username, password, vhost): """ Fetches queue depths from rabbitmq instance.""" cl = Client(host, username, password) if not cl.is_alive(): raise Exception("Failed to connect to rabbitmq") depths = {} queues = [q['name'] for q in cl.get_queues(vhost=vhost)] for queue in queues: if queue == "aliveness-test": #pyrabbit continue elif queue.startswith('amq.gen-'): #Anonymous queues continue depths[queue] = cl.get_queue_depth(vhost, queue) return depths
def get_queue_depths(host, username, password, vhost): cl = Client(host, username, password) if not cl.is_alive(): raise Exception("Failed to connect to rabbitmq") depths = {} queues = [q['name'] for q in cl.get_queues(vhost=vhost)] for queue in queues: if queue == "aliveness-test": #pyrabbit continue elif queue.endswith('.pidbox') or queue.startswith( 'celeryev.'): #celery continue depths[queue] = cl.get_queue_depth(vhost, queue) return depths
def configure(configobj): global INTERVAL global cl global queues_to_count global vhost config = {c.key: c.values for c in configobj.children} INTERVAL = config['interval'][0] host = config['host'][0] port = int(config['port'][0]) username = config['username'][0] password = config['password'][0] if 'vhost' in config: vhost = config['vhost'][0] else: vhost = '/' queues_to_count = [] if 'message_count' in config: queues_to_count = config['message_count'] collectd.info('rabbitmq_monitoring: Interval: {}'.format(INTERVAL)) cl = Client('{}:{}'.format(host, port), username, password) collectd.info( 'rabbitmq_monitoring: Connecting to: {}:{} as user:{} password:{}'. format(host, port, username, password)) collectd.info('rabbitmq_monitoring: Counting messages on: {}'.format( queues_to_count)) collectd.register_read(read, INTERVAL)
def main(): global rabbitClient # load config file configFilePath = args.config if os.path.isfile(configFilePath): logging.debug('Processing config file {0}'.format(configFilePath)) with open(configFilePath) as configFile: conf = json.load(configFile) logging.debug('Graphite configuration: {0}'.format( conf["graphite_servers"])) logging.debug('RabbitMQ configuration: {0}'.format( conf["rabbitmq_clusters"])) for rabbitmq in conf["rabbitmq_clusters"]: logging.debug('Working on Rabbitmq cluster: {0}'.format( rabbitmq['cluster_name'])) rabbitClient = Client( '{0}:{1}'.format(rabbitmq['host'], rabbitmq['port']), rabbitmq['username'], rabbitmq['password']) for graphite in conf["graphite_servers"]: process(rabbitmq, graphite) else: logging.error( 'You must pass existing configFilePath, actual is {0}'.format( configFilePath)) sys.exit(1)
def Main(): cgiEnv = lib_common.CgiEnv() configNam = cgiEnv.GetId() nodeManager = survol_rabbitmq_manager.MakeUri(configNam) creds = lib_credentials.GetCredentials("RabbitMQ", configNam) # cl = Client('localhost:12345', 'guest', 'guest') cl = Client(configNam, creds[0], creds[1]) grph = cgiEnv.GetGraph() # cl.is_alive() try: # listQueues = cl.get_queues() except: # exc = sys.exc_info()[1] lib_common.ErrorMessageHtml("Caught:" + str(exc)) for quList in listQueues: namQueue = quList["name"] DEBUG("q=%s", namQueue) namVHost = quList["vhost"] nodVHost = survol_rabbitmq_vhost.MakeUri(configNam, namVHost) nodeQueue = survol_rabbitmq_queue.MakeUri(configNam, namVHost, namQueue) grph.add((nodeQueue, lib_common.MakeProp("vhost"), lib_common.NodeLiteral(namVHost))) grph.add((nodeQueue, lib_common.MakeProp("vhost node"), nodVHost)) managementUrl = rabbitmq.ManagementUrlPrefix(configNam, "queues", namVHost, namQueue) grph.add((nodeQueue, lib_common.MakeProp("Management"), lib_common.NodeUrl(managementUrl))) grph.add((nodeManager, lib_common.MakeProp("Queue"), nodeQueue)) cgiEnv.OutCgiRdf()
def main(): rabbit = None try: # Ensure there are no paralell runs of this script lock.acquire(timeout=5) # Connect to Rabbit nullwrite = NullWriter() oldstdout = sys.stdout sys.stdout = nullwrite # disable output rabbit = RabbitClient('%s:%s' % (rabbitmq['host'], rabbitmq['port']), rabbitmq['username'], rabbitmq['password']) if not rabbit.is_alive(): raise Exception('Cannot connect to RabbitMQ') vhost = rabbit.get_vhost(rabbitmq['vhost']) queues = rabbit.get_queues(rabbitmq['vhost']) sys.stdout = oldstdout # enable output # Build outcome metrics = [] for key in keys: if type(key) == dict and 'vhost' in key: for subkey in key['vhost']: if subkey in vhost: metrics.append(Metric(rabbitmq['host'], 'rabbitmq.%s.%s' % ('vhost', subkey), vhost[subkey])) elif type(key) == dict and 'vhost.message_stats' in key: for subkey in key['vhost.message_stats']: if subkey in vhost['message_stats']: metrics.append(Metric(rabbitmq['host'], 'rabbitmq.%s.%s' % ('vhost.message_stats', subkey), vhost['message_stats'][subkey])) elif type(key) == dict and 'queues' in key: for queue in queues: for subkey in key['queues']: if subkey in queue: metrics.append(Metric(rabbitmq['host'], 'rabbitmq.%s.%s[%s]' % ('queue', subkey, queue['name']), queue[subkey])) # Send packet to zabbix send_to_zabbix(metrics, zabbix_host, zabbix_port) except LockTimeout: print 'Lock not acquired, exiting' except AlreadyLocked: print 'Already locked, exiting' except Exception, e: print type(e) print 'Error: %s' % e
def _get_rabbit_connection(): """Helper giving us a rabbit connection from settings.BROKER_URL""" host_with_port = "{}:{}/api/".format(settings.RABBITMQ_HOST, settings.RABBITMQ_MANAGEMENT_PORT) if settings.BROKER_USE_SSL: scheme = 'https' else: scheme = 'http' return Client(host_with_port, settings.RABBITMQ_DEFAULT_USER, settings.RABBITMQ_DEFAULT_PASS, scheme=scheme)
class rabbitmonitor(threading.Thread): def __init__(self): threading.Thread.__init__ (self) self.scheduler = sched.scheduler(time.time,time.sleep) self.cl = Client("localhost:55672","guest","guest") def rabbit_stats(self): rabbit_data={} try: exchanges = self.cl.get_exchanges() queues = self.cl.get_queues() binding = self.cl.get_bindings() rabbit_data['time'] = time.time() rabbit_data['exchanges'] = exchanges rabbit_data['queues'] = queues print rabbit_data except: pass; def run(self): while(1): self.scheduler.enter(2,1,self.rabbit_stats,()); self.scheduler.run()
class MqSmokeTest(unittest.TestCase): def setUp(self): self.cl = Client("mq:15672", os.environ.get("RABBITMQ_USER"), os.environ.get("RABBITMQ_PASSWORD")) self.assertTrue(self.cl.is_alive()) def test_exchange(self): exchanges = self.cl.get_exchanges() for exchange in exchanges: if exchange["name"] == "sfm_exchange": break else: self.assertTrue(False, "Exchange not found.") def test_queues(self): queues = self.cl.get_queues() queues_names = {queue["name"] for queue in queues} # Add additional queue names as new components are added. self.assertTrue(queues_names.issuperset(set(["flickr_harvester", "sfm_ui", "twitter_harvester", "twitter_rest_harvester"])))
def bindAccountCheck(accounts, rabbit_client, rabbit_user, rabbit_password, rabbit_ip, rabbit_port, rabbit_host, queue_name, exchangeName, r_key): cl = AdminClient(rabbit_ip + ':1' + rabbit_port, rabbit_user, rabbit_password) bindedaccount = None # queues = [q[queue_name] for q in cl.get_queues()] bindings = [q['routing_key'] for q in cl.get_queue_bindings(vhost=rabbit_host.replace("/", ""), qname=queue_name)] myaccounts = [] for x in accounts: myaccounts.append(x[0]) for routing_key in bindings: if routing_key != queue_name: if routing_key not in ('#', '*'): try: bindedaccount = routing_key.lower().split('account.')[1].split('.')[0] if bindedaccount is not None: try: if bindedaccount not in myaccounts: unbindAccountFromQueue(bindedaccount, exchangeName, queue_name, rabbit_client, r_key) logger_debug(("unbinded: %s %s %s %s" % (bindedaccount, exchangeName, queue_name, r_key))) except: logger_error("incorrect binding %s" % bindedaccount) except: logger_error("IndexError %s,%s" % (queue_name, routing_key)) bindedaccount = 999999999 pass elif routing_key not in myaccounts: logger_debug("routing_key=%s" % routing_key) unbindAccountFromQueue(routing_key, exchangeName, queue_name, rabbit_client, r_key) try: if bindedaccount is not None: logger_debug("unbinded: %s %s %s %s" % (bindedaccount, exchangeName, queue_name, r_key)) else: logger_debug("All unbinded.") except: pass
parser.add_argument("-rabbitmq_password", dest='rabbitmq_password', type=str, help="Password for RabbitMQ auth", default='guest') parser.add_argument("-username", dest='username', type=str, help="Username", default='test') parser.add_argument("-password", dest='password', type=str, help="Password", default='swordfish') parser.add_argument("-vhostname", dest='vhostname', type=str, help="Vhost name", default='test') args = parser.parse_args() rabbitmq_url = args.rabbitmq_url rabbitmq_user = args.rabbitmq_username rabbitmq_password = args.rabbitmq_password user = args.username password = args.password vhost = args.vhostname cl = Client(rabbitmq_url, rabbitmq_user, rabbitmq_password) assert cl.is_alive() for queue in cl.get_queues(): if queue['vhost'] == vhost: cl.purge_queue(vhost, queue['name']) cl.delete_queue(vhost, queue['name']) for vhost_ in cl.get_all_vhosts(): if vhost_['name'] == vhost: while True: try: cl.delete_vhost(vhost_['name']) break except Exception: pass
def getQueueMessagesCount(rabbit_ip, rabbit_port, rabbit_user, rabbit_password, rabbit_host, queue_name): cl = AdminClient(rabbit_ip + ':1' + rabbit_port, rabbit_user, rabbit_password) messages_cnt = cl.get_queue_depth(vhost=rabbit_host.replace("/", ""), name=queue_name) return messages_cnt
def setUp(self): self.cl = Client("mq:15672", os.environ.get("RABBITMQ_USER"), os.environ.get("RABBITMQ_PASSWORD")) self.assertTrue(self.cl.is_alive())
parser.add_argument("-rabbitmq_url", dest='rabbitmq_url', type=str, help="URL of using RabbitMQ", default='localhost:55672') parser.add_argument("-rabbitmq_username", dest='rabbitmq_username', type=str, help="Username for RabbitMQ auth", default='guest') parser.add_argument("-rabbitmq_password", dest='rabbitmq_password', type=str, help="Password for RabbitMQ auth", default='guest') parser.add_argument("-username", dest='username', type=str, help="Username", default='test') parser.add_argument("-password", dest='password', type=str, help="Password", default='swordfish') parser.add_argument("-vhostname", dest='vhostname', type=str, help="Vhost name", default='test') args = parser.parse_args() rabbitmq_url = args.rabbitmq_url rabbitmq_user = args.rabbitmq_username rabbitmq_password = args.rabbitmq_password user = args.username password = args.password vhost = args.vhostname cl = Client(rabbitmq_url, rabbitmq_user, rabbitmq_password) assert cl.is_alive() for i in cl.get_all_vhosts(): if i['name'] == vhost: cl.delete_vhost(i['name']) for i in cl.get_users(): if i['name'] == user: cl.delete_user(i['name']) cl.create_vhost(vhost) cl.create_user(user, password, tags='administrator') cl.set_vhost_permissions(vhost, user, '.*', '.*', '.*')
class RabbitHelper(object): def __init__(self, mq_server = None, virtual_host = cfg.CB_CLUSTER_TAG): if mq_server == None: mq_server = cfg.RABBITMQ_IP self.connection = Connection(host= mq_server, userid="guest", password="******", virtual_host = virtual_host) self.manager = Client(mq_server+":55672", "guest", "guest") def declare(self, queue = None, durable = True): res = None channel = self.connection.channel() if queue: if not isinstance(queue,str): queue = str(queue) res = channel.queue_declare(queue = queue, durable = durable, auto_delete = True) else: # tmp queue res = channel.queue_declare(exclusive = True) channel.close() return res def exchange_declare(self, exchange, type_='direct'): channel = self.connection.channel() channel.exchange_declare(exchange = exchange, type=type_) channel.close() def bind(self, exchange, queue): channel = self.connection.channel() channel.queue_bind(exchange = exchange, queue = queue) channel.close() def delete(self, queue): channel = self.connection.channel() if not isinstance(queue,str): queue = str(queue) channel.queue_delete(queue=queue) channel.close() def purge(self, queue): channel = self.connection.channel() if not isinstance(queue,str): queue = str(queue) channel.queue_purge(queue=queue) channel.close() def channel(self): return self.connection.channel(), self.connection def qsize(self, queue): size = 0 if queue != None: if not isinstance(queue,str): queue = str(queue) response = self.declare(queue = queue) size = response[1] return size def broadcastMsg(self, routing_key, body): channel = self.connection.channel() rc = channel.basic_publish(exchange = '', routing_key = routing_key, body = body) channel.close() def getExchange(self, vhost, exchange): return self.manager.get_exchange(vhost, exchange) def numExchangeQueues(self, vhost, exchange): try: ex = self.getExchange(vhost, exchange) return len(ex['outgoing']) except Exception: return 1 # todo: sometimes the broker doesn't return expected response def putMsg(self, routing_key, body, exchange = ''): channel = self.connection.channel() if not isinstance(routing_key, str): routing_key= str(routing_key) rc = channel.basic_publish(exchange = exchange, routing_key = routing_key, body = body) channel.close() def getMsg(self, queue, no_ack = False, requeue = False): channel = self.connection.channel() message = channel.basic_get(queue = queue) body = None if message is not None: body = message.body # Handle data receipt acknowldegement if no_ack == False: message.ack() if requeue: self.putMsg(queue, body) channel.close() return body def getJsonMsg(self, queue, no_ack = False, requeue = False): msg = self.getMsg(queue, no_ack, requeue) body = {} if msg is not None: try: body = json.loads(msg) except ValueError: pass return body def close(self): self.connection.close() def __del__(self): self.close()
parser.add_argument("--password", help="the password to login", required=True) parser.add_argument("--host", help="the host where rabbitmq runs on", required=True) parser.add_argument("--port", help="the the port where rabbitmq listens to", default=15672) parser.add_argument("--listVhosts", help="list available vhosts", action="store_true") parser.add_argument("--vhost", help="use given vhost") parser.add_argument("--listQueues", help="list available Queues", action="store_true") parser.add_argument("--queue", help="use given queue") parser.add_argument("--message", help="The message to send") parser.add_argument("--listen", help="wait for a message on that channel", action="store_true") args = parser.parse_args() client = Client(args.host + ":" + str(args.port), args.user, args.password) if args.listVhosts: pprint(getVhosts(client)) exit(0) if not args.vhost: print("vhost argument is needed") exit(1) elif not args.vhost in getVhosts(client): print("vhost not available") exit(2) if args.listQueues: pprint(getQueues(client, args.vhost))
def _list_amqp_queues(self): rabbit_client = Client('localhost:15672', 'guest', 'guest') queues = [q['name'] for q in rabbit_client.get_queues()] return queues
class Collector(object): def __init__(self, index): self.index = index self.cancelled = False self.transport = self.protocol = None self.messages = [] self.block_size = 1000 self.exchange = "logstash" self.current_date = None self.logger = getLogger('alco.collector.%s' % self.index.name) self.amqp = self.redis = self.conn = self.vhost = self.rabbit = None self.insert_thread = None self.query_queue = queue.Queue() self.result_queue = queue.Queue() self.result_queue.put(None) self.query = self.values_stub = None self.existing = self.included = self.indexed = self.filtered = None def cancel(self): self.cancelled = True def inserter_loop(self): self.conn = connections[ALCO_SETTINGS['SPHINX_DATABASE_NAME']] while not self.cancelled: try: query, args = self.query_queue.get(block=True, timeout=1) except queue.Empty: continue result = self.insert_data(query, args) self.result_queue.put(result) def connect(self): connections['default'].close() rabbitmq = ALCO_SETTINGS['RABBITMQ'] self.amqp = Connection(**rabbitmq) self.redis = redis.Redis(**ALCO_SETTINGS['REDIS']) self.insert_thread = Thread(target=self.inserter_loop) self.insert_thread.start() hostname = '%s:%s' % (rabbitmq['host'], ALCO_SETTINGS['RABBITMQ_API_PORT']) self.rabbit = Client(hostname, rabbitmq['userid'], rabbitmq['password']) self.vhost = rabbitmq['virtual_host'] # noinspection PyUnusedLocal def process_sigint(self, signum, frame): self.logger.info("Got signal %s" % signum) self.cancel() self.logger.info("Futures cancelled, wait for thread") self.insert_thread.join() self.logger.info("Thread done") def __call__(self): signal.signal(signal.SIGINT, self.process_sigint) signal.signal(signal.SIGTERM, self.process_sigint) try: self.logger.debug("Connecting to RabbitMQ") self.connect() self.declare_queue() self.cleanup_bindings() channel = self.amqp.channel() channel.basic_qos(0, 1000, False) channel.basic_consume(self.index.queue_name, callback=self.process_message, no_ack=True) start = time.time() self.logger.debug("Start processing messages") while not self.cancelled: try: self.amqp.drain_events(timeout=1) except (socket.timeout, OSError): pass if time.time() - start > 1: self.push_messages() start = time.time() except KeyboardInterrupt: self.logger.warning("Got SIGINT, exit(0)") finally: self.amqp.close() sys.exit(0) def process_message(self, msg): data = json.loads(six.binary_type(msg.body)) ts = data.pop('@timestamp') data.pop("@version") msg = data.pop('message') seq = data.pop('seq', 0) dt = datetime.strptime(ts, "%Y-%m-%dT%H:%M:%S.%fZ") result = { 'ts': time.mktime(dt.timetuple()), 'ms': dt.microsecond, 'seq': seq, 'message': msg, 'data': data } self.messages.append(result) d = dt.date() if not self.current_date: self.current_date = d if d != self.current_date: self.current_date = d self.push_messages() if len(self.messages) >= self.block_size: self.push_messages() def declare_queue(self): channel = self.amqp.channel() """:type channel: amqp.channel.Channel""" durable = self.index.durable channel.exchange_declare(exchange=self.exchange, type='topic', durable=durable, auto_delete=False) channel.queue_declare(self.index.queue_name, durable=durable, auto_delete=False) for rk in self.get_routing_keys(): channel.queue_bind(self.index.queue_name, exchange=self.exchange, routing_key=rk) def get_routing_keys(self): return map(lambda x: x.strip(), self.index.routing_key.split(',')) def cleanup_bindings(self): self.logger.debug("Checking bindings") queue = self.index.queue_name exchange = self.exchange bindings = self.rabbit.get_queue_bindings(self.vhost, queue) bindings = [b for b in bindings if b['source'] == exchange] allowed = self.get_routing_keys() q = six.moves.urllib.parse.quote for b in bindings: rk = b['routing_key'] if rk in allowed: continue self.logger.debug("Unbind %s with RK=%s" % (queue, rk)) self.rabbit.delete_binding(self.vhost, exchange, q(queue), q(rk)) def push_messages(self): try: request_started.send(None, environ=None) self._push_messages() except Exception as e: self.logger.exception(e) raise finally: request_finished.send(None) def _push_messages(self): messages, self.messages = self.messages, [] if not messages: return message_count = len(messages) self.logger.info("Saving %s events" % message_count) columns = defaultdict(set) suffix = self.current_date.strftime("%Y%m%d") name = "%s_%s" % (self.index.name, suffix) args = [] self.load_index_columns() self.prepare_query(name) pkeys = self.get_primary_keys(messages) seen = set() for pk, data in zip(pkeys, messages): # saving seen columns to LoggerColumn model, collecting unique # values for caching in redis js = data['data'] self.process_js_columns(js, columns, self.included, seen) js_str = json.dumps(js) values = tuple(js.get(c) or '' for c in self.indexed) args.extend((pk, js_str, data['message']) + values) query = self.query + ','.join([self.values_stub] * message_count) self.save_column_values(columns) self.save_new_columns(seen) if self.result_queue.empty(): self.logger.debug("Insert still running, waiting") while not self.cancelled: try: self.result_queue.get(block=True, timeout=1) except queue.Empty: continue self.query_queue.put((query, args)) def insert_data(self, query, args): self.logger.debug("Inserting logs to searchd") result = None for _ in 1, 2, 3: try: c = self.conn.cursor() result = c.execute(query, args) self.logger.debug("%s rows inserted" % c.rowcount) c.close() except ProgrammingError: self.logger.exception( "Can't insert values to index: %s" % query) except DatabaseError as e: self.logger.exception("Sphinx connection error: %s" % e) try: close_old_connections() except Exception as e: self.logger.exception("Can't reconnect: %s" % e) os.kill(os.getpid(), signal.SIGKILL) except Exception: self.logger.exception("Unhandled error in insert_data") else: return result self.logger.error("Can't insert data in 3 tries, exit process") os.kill(os.getpid(), signal.SIGKILL) def save_new_columns(self, seen): self.logger.debug("Check for new columns") for column in seen - set(self.existing): self.logger.debug("Register column %s" % column) self.index.loggercolumn_set.create(name=column) def save_column_values(self, columns): self.logger.debug("Saving values for filtered columns") ts = time.time() for column in self.filtered: values = columns.get(column) if not values: continue key = keys.KEY_COLUMN_VALUES.format(index=self.index.name, column=column) values = {v: ts for v in values} self.redis.zadd(key, **values) def prepare_query(self, name): if self.indexed: self.query = "REPLACE INTO %s (id, js, logline, %s) VALUES " % ( name, ', '.join(self.indexed)) else: self.query = "REPLACE INTO %s (id, js, logline) VALUES " % name sql_col_count = len(self.indexed) + 3 # + jd, js, logline self.values_stub = "(%s)" % ", ".join(["%s"] * sql_col_count) def load_index_columns(self): # all defined columns all_columns = list(self.index.loggercolumn_set.all()) included_columns = [c for c in all_columns if not c.excluded] filtered_columns = [c for c in included_columns if c.filtered] indexed_columns = [c for c in included_columns if c.indexed] self.existing = [c.name for c in all_columns] self.included = [c.name for c in included_columns] self.filtered = [c.name for c in filtered_columns] self.indexed = [c.name for c in indexed_columns] @staticmethod def process_js_columns(js, columns, included, seen): for key, value in list(js.items()): if key in ('pk', 'id', 'ts', 'ms', 'seq', 'model'): # escape fields reserved by Django and ALCO js['%s_x' % key] = js.pop(key) key = '%s_x' % key # save seen columns set if key not in seen: seen.add(key) if key not in included: # discard fields excluded from indexing js.pop(key) continue # save column values set if type(value) not in (bool, int, float, six.text_type): continue if value not in columns[key]: columns[key].add(value) def get_primary_keys(self, messages): """ Generate PK sequence for a list of messages.""" pkeys = [] pk = None for msg in messages: # pk is [timestamp][microseconds][randint] in 10based integer pk = int((msg['ts'] * 10**6 + msg['ms']) * 1000) + randint(0, 1000) pkeys.append(pk) self.logger.debug("first pk is %s" % pk) return pkeys
def __init__(self): threading.Thread.__init__ (self) self.scheduler = sched.scheduler(time.time,time.sleep) self.cl = Client("localhost:55672","guest","guest")
from pyrabbit.api import Client client = Client('http://localhost:55672', 'guest', 'guest') client = Client('localhost:15672', 'guest', 'guest') print(client.is_alive()) messages = client.get_messages('/', 'celery') print(messages)
from pyrabbit.api import Client import json cl = Client('queue01.core.cmc.lan', 'guest', 'guest') #cl.is_alive() messages = cl.get_messages('/', 'jenkins_results', requeue=True) for message in messages: payload = json.loads(message['payload']) # m = json.loads(message) # print '\n' + m['payload'] print '\nProject : %s' % payload['project'] print 'Buildnummer: %s' % payload['number'] print 'Status : %s' % payload['status'] print