def test_wildcard_query(): jmxConnection = JMXConnection(CONNECTION_URL) jmxQuery = [JMXQuery("*:*")] metrics = jmxConnection.query(jmxQuery) printMetrics(metrics) assert_greater_equal(len(metrics), 4699)
def jmx_oper(terminate_url, terminate_port): jmx_url = "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi" % (terminate_url, terminate_port) jmx_connection = JMXConnection(jmx_url) # 这里写对应使用的jdk路径 ## memory type_str = "Memory" bean_name_str = "ServiceAuthMBean:name=ServiceAuth".format(type_str) jmx_connection.query()
def get_output(): URL = "service:jmx:rmi:///jndi/rmi://" + HOST_NAME + ":" + PORT + "/jmxrmi" try: jmxConnection = JMXConnection(URL) QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=CACHE,scope=searcher,name=filterCache" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "filter_cache_") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=CACHE,scope=searcher,name=fieldValueCache" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "cache_") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=CACHE,scope=searcher,name=queryResultCache" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "result_cache_") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=CACHE,scope=searcher,name=documentCache" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "document_cache_") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=SEARCHER,scope=searcher,name=maxDoc" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "searcher_maxdoc") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=SEARCHER,scope=searcher,name=numDocs" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "searcher_numdocs") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=SEARCHER,scope=searcher,name=warmupTime" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "searcher_warmup") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=UPDATE,scope=update,name=requestTimes" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "request_times_") QUERY = "solr:dom1=core,dom2="+DOMAIN + \ ",category=UPDATE,scope=update,name=requestTimes" result_json = get_metrics_from_jmx(jmxConnection, QUERY, "request_times_") except Exception as e: result_json["status"] = 0 result_json["msg"] = str(e) return result_json
def __init__(self, jmx_info, mbenas_file): super(JMXMetrics, self).__init__() self.stop = False svc_url = 'service:jmx:rmi:///jndi/rmi://%s/jmxrmi' % jmx_info[1] self._conn = JMXConnection(svc_url) self._cnr_name = jmx_info[0] self._metrics = None with open(mbenas_file) as f: mbeans = json.load(f) self._query_obj = [] for mbean in mbeans: for val in mbean['Values']: self._query_obj.append( JMXQuery(mBeanName=mbean['ObjectName'], attribute=val['Attribute'], value_type=val['Type'], metric_name=val['InstancePrefix'], metric_labels={'type': val['Type']})) # Automatically start stats reading thread self.start()
def get_jmx(host, port, jmxQuery=None): """Get jmx :param host: host :param port: port :param jmxQuery: jmxQuery """ jmxConnection = JMXConnection("service:jmx:rmi:///jndi/rmi://" + str(host) + ":" + str(port) + "/jmxrmi") metrics = jmxConnection.query(jmxQuery) for metric in metrics: x = re.search( "name=\/\/(?P<hostname>.*)\/(?!docs,|manager,)(?P<appname>[^,]+)", metric.mBeanName) if x is not None: if x.group('appname') is not None: appname = x.group('appname') now = datetime.now() starttime = datetime.fromtimestamp(metric.value / 1e3) delta = now - starttime logger.info('Server: %s; appname: %s; uptime: %s', host, appname, str(delta))
class JMXMetrics(threading.Thread): def __init__(self, jmx_info, mbenas_file): super(JMXMetrics, self).__init__() self.stop = False svc_url = 'service:jmx:rmi:///jndi/rmi://%s/jmxrmi' % jmx_info[1] self._conn = JMXConnection(svc_url) self._cnr_name = jmx_info[0] self._metrics = None with open(mbenas_file) as f: mbeans = json.load(f) self._query_obj = [] for mbean in mbeans: for val in mbean['Values']: self._query_obj.append( JMXQuery(mBeanName=mbean['ObjectName'], attribute=val['Attribute'], value_type=val['Type'], metric_name=val['InstancePrefix'], metric_labels={'type': val['Type']})) # Automatically start stats reading thread self.start() def run(self): logger.info('start gathering metrics for Kafka container: %s' % self._cnr_name) failures = 0 while not self.stop: try: self._metrics = self._conn.query(self._query_obj) except Exception as ex: failures += 1 if failures >= 3: self.stop = True logger.error('failed to query metrics for kafka container %s' % self._cnr_name) logger.info('stop gathering metrics for kafka container %s' % self._cnr_name) @property def metrics(self): if self._metrics: return self._metrics return None
def get_output(): URL = "service:jmx:rmi:///jndi/rmi://" + HOST_NAME + ":" + PORT + "/jmxrmi" try: jmxConnection = JMXConnection(URL) OUERY = "org.apache.activemq:type=Broker,brokerName=" + BROKER_NAME result_json = mbean_attributes(jmxConnection, OUERY) OUERY = "org.apache.activemq:type=Broker,brokerName=" + BROKER_NAME + \ ",destinationType=Queue,destinationName=" + DESTINATION_NAME result_json = mbean_attributes(jmxConnection, OUERY) result_json["broker_name"] = BROKER_NAME except Exception as e: result_json["status"] = 0 result_json["msg"] = str(e) return result_json
def collect(self, _): try: host = self.get('ip', 'localhost') port = self.get('port', 9999) jmx_url = f'service:jmx:rmi:///jndi/rmi://{host}:{port}/jmxrmi' jmxConnection = JMXConnection(jmx_url) jmxQuery = [ # UnderReplicatedPartitions JMXQuery( "kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions/Value", metric_name= "kafka_server_ReplicaManager_UnderReplicatedPartitions"), # OfflinePartitionsCount JMXQuery( "kafka.controller:type=KafkaController,name=OfflinePartitionsCount/Value", metric_name= "kafka_controller_KafkaController_OfflinePartitionsCount"), # ActiveControllerCount JMXQuery( "kafka.controller:type=KafkaController,name=ActiveControllerCount/Value", metric_name= "kafka_controller_KafkaController_ActiveControllerCount"), # MessagesInPerSec JMXQuery( "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec/Count", metric_name= "kafka_server_BrokerTopicMetrics_MessagesInPerSec_Count"), # BytesInPerSec JMXQuery( "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec/Count", metric_name= "kafka_server_BrokerTopicMetrics_BytesInPerSec_Count"), # BytesOutPerSec JMXQuery( "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec/Count", metric_name= "kafka_server_BrokerTopicMetrics_BytesOutPerSec_Count"), # RequestsPerSec JMXQuery( "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=*/Count", metric_name= "kafka_network_RequestMetrics_RequestsPerSec_Count", metric_labels={"request": "{request}"}), # TotalTimeMs JMXQuery( "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=*", metric_name= "kafka_network_RequestMetrics_TotalTimeMs_{attribute}", metric_labels={"request": "{request}"}), # LeaderElectionsPerSec JMXQuery( "kafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs/Count", metric_name= "kafka_cluster_ControllerStats_LeaderElectionRateAndTimeMs_Count" ), # UncleanLeaderElectionsPerSec JMXQuery( "kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec/Count", metric_name= "kafka_cluster_ControllerStats_UncleanLeaderElectionsPerSec_Count" ), # PartitionCount JMXQuery( "kafka.server:type=ReplicaManager,name=PartitionCount/Value", metric_name="kafka_server_ReplicaManager_PartitionCount"), # ISRShrinkRate JMXQuery( "kafka.server:type=ReplicaManager,name=IsrShrinksPerSec", metric_name= "kafka_server_ReplicaManager_IsrShrinksPerSec_{attribute}" ), # ISRExpandRate JMXQuery( "kafka.server:type=ReplicaManager,name=IsrExpandsPerSec", metric_name= "kafka_server_ReplicaManager_IsrExpandsPerSec_{attribute}" ), # NetworkProcessorAvgIdlePercent JMXQuery( "kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent/Value", metric_name= "kafka_network_SocketServer_NetworkProcessorAvgIdlePercent" ), # RequestHandlerAvgIdlePercent JMXQuery( "kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent", metric_name= "kafka_server_KafkaRequestHandlerPool_RequestHandlerAvgIdlePercent_{attribute}" ), # ZooKeeperDisconnectsPerSec JMXQuery( "kafka.server:type=SessionExpireListener,name=ZooKeeperDisconnectsPerSec", metric_name= "kafka_server_SessionExpireListener_ZooKeeperDisconnectsPerSec_{attribute}" ), # ZooKeeperExpiresPerSec JMXQuery( "kafka.server:type=SessionExpireListener,name=ZooKeeperExpiresPerSec", metric_name= "kafka_server_SessionExpireListener_ZooKeeperExpiresPerSec_{attribute}" ), # LeaderCount JMXQuery( "kafka.server:type=ReplicaManager,name=LeaderCount/Value", metric_name="kafka_server_ReplicaManager_LeaderCount"), # MaxLag JMXQuery( "kafka.server:type=ReplicaFetcherManager,name=MaxLag,clientId=Replica", metric_name="kafka_server_ReplicaFetcherManager_MaxLag"), # OpenFileDescriptorCount JMXQuery( "java.lang:type=OperatingSystem/OpenFileDescriptorCount", metric_name= "java_lang_OperatingSystem_OpenFileDescriptorCount"), # MaxFileDescriptorCount JMXQuery( "java.lang:type=OperatingSystem/MaxFileDescriptorCount", metric_name= "java_lang_OperatingSystem_MaxFileDescriptorCount"), # Producer: connection-count JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/connection-count", metric_name= "kafka_producer_producer-metrics_connection-count"), # Producer: waiting-threads JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/waiting-threads", metric_name= "kafka_producer_producer-metrics_waiting-threads"), # Producer: record-send-total JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/record-send-total", metric_name= "kafka_producer_producer-metrics_record-send-total"), # Producer: request-rate JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/request-rate", metric_name="kafka_producer_producer-metrics_request-rate" ), # Producer: response-rate JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/response-rate", metric_name="kafka_producer_producer-metrics_response-rate" ), # Producer: outgoing-byte-rate JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/outgoing-byte-rate", metric_name= "kafka_producer_producer-metrics_outgoing-byte-rate"), # Producer: incoming-byte-rate JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/incoming-byte-rate", metric_name= "kafka_producer_producer-metrics_incoming-byte-rate"), # Producer: request-latency-avg JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/request-latency-avg", metric_name= "kafka_producer_producer-metrics_request-latency-avg"), # Producer: io-wait-time-ns-avg JMXQuery( "kafka.producer:type=producer-metrics,client-id=*/io-wait-time-ns-avg", metric_name= "kafka_producer_producer-metrics_io-wait-time-ns-avg"), # Consumer: records-consumed-total JMXQuery( "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*/records-consumed-total", metric_name= "kafka_consumer_consumer-fetch-manager-metrics_records-consumed-total" ), # Consumer: records-consumed-rate JMXQuery( "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*/records-consumed-rate", metric_name= "kafka_consumer_consumer-fetch-manager-metrics_records-consumed-rate" ), # Consumer: records-lag-max JMXQuery( "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*/records-lag-max", metric_name= "kafka_consumer_consumer-fetch-manager-metrics_records-lag-max" ), # Consumer: bytes-consumed-rate JMXQuery( "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*/bytes-consumed-rate", metric_name= "kafka_consumer_consumer-fetch-manager-metrics_bytes-consumed-rate" ), # Consumer: fetch-rate JMXQuery( "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*/fetch-rate", metric_name= "kafka_consumer_consumer-fetch-manager-metrics_fetch-rate" ), # Consumer: fetch-latency-avg JMXQuery( "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*/fetch-latency-avg", metric_name= "kafka_consumer_consumer-fetch-manager-metrics_fetch-latency-avg" ), # Consumer: assigned-partitions JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/assigned-partitions", metric_name= "kafka_consumer_consumer-coordinator-metrics_assigned-partitions" ), # Consumer: commit-total JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/commit-total", metric_name= "kafka_consumer_consumer-coordinator-metrics_commit-total" ), # Consumer: join-total JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/join-total", metric_name= "kafka_consumer_consumer-coordinator-metrics_join-total"), # Consumer: sync-total JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/sync-total", metric_name= "kafka_consumer_consumer-coordinator-metrics_sync-total"), # Consumer: commit-rate JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/commit-rate", metric_name= "kafka_consumer_consumer-coordinator-metrics_commit-rate"), # Consumer: commit-latency-avg JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/commit-latency-avg", metric_name= "kafka_consumer_consumer-coordinator-metrics_commit-latency-avg" ), # Consumer: join-rate JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/join-rate", metric_name= "kafka_consumer_consumer-coordinator-metrics_join-rate"), # Consumer: sync-rate JMXQuery( "kafka.consumer:type=consumer-coordinator-metrics,client-id=*/sync-rate", metric_name= "kafka_consumer_consumer-coordinator-metrics_sync-rate"), ] metrics = jmxConnection.query(jmxQuery) for metric in metrics: try: if (metric.value_type != "String") and (metric.value_type != ""): if metric.metric_name.lower() in COUNTER_METRICS: self.counter(metric.metric_name, metric.metric_labels).set( metric.value) else: self.gauge(metric.metric_name, metric.metric_labels).set(metric.value) except: # Ignore if a new type is returned from JMX that isn't a number pass return Status.OK except Exception as ex: self.logger.error('Unable to scrape metrics from Kafka') return Status.CRITICAL
#https://www.wowza.com/docs/how-to-use-jconsole-with-wowza-media-server#remoteJMXConfigObjectList from jmxquery import JMXConnection, JMXQuery jmx = JMXConnection( "service:jmx:rmi://localhost:8084/jndi/rmi://localhost:8085/jmxrmi", jmx_username="******", jmx_password="******", java_path="/usr/local/WowzaStreamingEngine/java/bin/java") wowza_object = jmx.query( [JMXQuery("WowzaStreamingEngine:vhostItems=*,vhostItem=*,name=*")]) #with open("WowzaStreamingEngine_properties.txt", "w") as f: # for i in wowza_object: #print(i.to_string()) # f.write(i.to_string() + "\n") for i in wowza_object: print('{0} value is {1}'.format(i.to_query_string(), i.value))
def collect(self, _): host = self.get('ip', 'localhost') port = self.get('port', 7199) jmx_url = f'service:jmx:rmi:///jndi/rmi://{host}:{port}/jmxrmi' jmxConnection = JMXConnection(jmx_url) jmxQuery = [ JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency/OneMinuteRate", metric_name="cassandra.reads"), JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency/OneMinuteRate", metric_name="cassandra.writes"), JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Latency/99thPercentile", metric_name="cassandra.read_latency_99th_percentile"), JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Latency/99thPercentile", metric_name="cassandra.write_latency_99th_percentile"), JMXQuery("org.apache.cassandra.metrics:type=Compaction,name=PendingTasks", metric_name="cassandra.compaction_pending_tasks"), JMXQuery("org.apache.cassandra.metrics:type=ColumnFamily,keyspace=*,scope=*,name=TotalDiskSpaceUsed", metric_name="cassandra.total_disk_space_used", metric_labels={"keyspace": "{keyspace}"}), JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Timeouts/OneMinuteRate", metric_name="cassandra.exceptions_read_timeouts"), JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Timeouts/OneMinuteRate", metric_name="cassandra.exceptions_write_timeouts"), JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Unavailables/OneMinuteRate", metric_name="cassandra.exceptions_read_unavailables"), JMXQuery("org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Unavailables/OneMinuteRate", metric_name="cassandra.exceptions_write_unavailables"), JMXQuery("org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=*,name=PendingTasks", metric_name="cassandra.threadpool_request_pending_tasks", metric_labels={"stage":"{scope}"}), JMXQuery("org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=*,name=CurrentlyBlockedTasks/Count", metric_name="cassandra.threadpool_request_currently_blocked_tasks", metric_labels={"stage":"{scope}"}), JMXQuery("java.lang:type=OperatingSystem/OpenFileDescriptorCount", metric_name="cassandra.open_file_descriptors"), JMXQuery("java.lang:type=OperatingSystem/MaxFileDescriptorCount", metric_name="cassandra.max_file_descriptors"), ] metrics = jmxConnection.query(jmxQuery) for metric in metrics: try: if (metric.value_type != "String") and (metric.value_type != ""): if metric.metric_name in RATE_METRICS: self.counter(metric.metric_name, metric.metric_labels).set(metric.value) else: if metric.metric_name == "cassandra.reads" or metric.metric_name == "cassandra.writes": metric.value = "{:.10f}".format(metric.value) self.gauge(metric.metric_name, metric.metric_labels).set(metric.value) except: # Ignore if a new type is returned from JMX that isn't a number pass return Status.OK
def collect(self, _): jmx_ip = self.get('ip', '127.0.0.1') jmx_port = self.get('port', '9012') jmx_url = f'service:jmx:rmi:///jndi/rmi://{jmx_ip}:{jmx_port}/jmxrmi' jmxConnection = JMXConnection(jmx_url) jmxQuery = [ # Threadpool Metrics JMXQuery("Catalina:type=ThreadPool,name=*/maxThreads", metric_name="tomcat.threadpool_maxthreads", metric_labels={"processor": "{name}"}), JMXQuery("Catalina:type=ThreadPool,name=*/currentThreadCount", metric_name="tomcat.threadpool_current_thread_count", metric_labels={"processor": "{name}"}), JMXQuery("Catalina:type=ThreadPool,name=*/currentThreadsBusy", metric_name="tomcat.threadpool_current_threads_busy", metric_labels={"processor": "{name}"}), # Global Request Processor Metrics JMXQuery("Catalina:type=GlobalRequestProcessor,name=*", metric_name="tomcat.global_request_processor_{attribute}", metric_labels={"processor": "{name}"}), # Cache Metrics JMXQuery("Catalina:type=Cache,host=*,context=*/accessCount", metric_name="tomcat.cache_access_count", metric_labels={ "tomcat_host": "{host}", "context": "{context}" }), JMXQuery("Catalina:type=Cache,host=*,context=*/hitsCount", metric_name="tomcat.cache_hits_count", metric_labels={ "tomcat_host": "{host}", "context": "{context}" }), # Servlet Metrics JMXQuery( "Catalina:j2eeType=Servlet,name=*,WebModule=*,*/processingTime", metric_name="tomcat.servlet_processingTime", metric_labels={ "webmodule": "{WebModule}", "servlet": "{name}" }), JMXQuery( "Catalina:j2eeType=Servlet,name=*,WebModule=*,*/errorCount", metric_name="tomcat.servlet_errorCount", metric_labels={ "webmodule": "{WebModule}", "servlet": "{name}" }), JMXQuery( "Catalina:j2eeType=Servlet,name=*,WebModule=*,*/requestCount", metric_name="tomcat.servlet_requestCount", metric_labels={ "webmodule": "{WebModule}", "servlet": "{name}" }), # JspMonitor Metrics JMXQuery( "Catalina:type=JspMonitor,name=jsp,WebModule=*,*/jspCount", metric_name="tomcat.jspmonitor_jsp_count", metric_labels={"webmodule": "{WebModule}"}), JMXQuery( "Catalina:type=JspMonitor,name=jsp,WebModule=*,*/jspReloadCount", metric_name="tomcat.jspmonitor_jsp_reload_count", metric_labels={"webmodule": "{WebModule}"}), ] metrics = jmxConnection.query(jmxQuery) for metric in metrics: try: if (metric.value_type != "String") and (metric.value_type != ""): if metric.metric_name in GAUGE_METRICS: self.gauge(metric.metric_name, metric.metric_labels).set(metric.value) else: self.counter(metric.metric_name, metric.metric_labels).set(metric.value) except: # Ignore if a new type is returned from JMX that isn't a number pass return Status.OK
def test_kafka_plugin(): jmxConnection = JMXConnection(CONNECTION_URL) jmxQuery = [ # UnderReplicatedPartitions JMXQuery( "kafka.server:type=ReplicaManager,name=UnderReplicatedPartitions/Value", metric_name="kafka_server_ReplicaManager_UnderReplicatedPartitions" ), # OfflinePartitionsCount JMXQuery( "kafka.controller:type=KafkaController,name=OfflinePartitionsCount/Value", metric_name= "kafka_controller_KafkaController_OfflinePartitionsCount"), # ActiveControllerCount JMXQuery( "kafka.controller:type=KafkaController,name=ActiveControllerCount/Value", metric_name="kafka_controller_KafkaController_ActiveControllerCount" ), # MessagesInPerSec JMXQuery( "kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec/Count", metric_name="kafka_server_BrokerTopicMetrics_MessagesInPerSec_Count" ), # BytesInPerSec JMXQuery( "kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec/Count", metric_name="kafka_server_BrokerTopicMetrics_BytesInPerSec_Count"), # BytesOutPerSec JMXQuery( "kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec/Count", metric_name="kafka_server_BrokerTopicMetrics_BytesOutPerSec_Count" ), # RequestsPerSec JMXQuery( "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=*/Count", metric_name="kafka_network_RequestMetrics_RequestsPerSec_Count", metric_labels={"request": "{request}"}), # TotalTimeMs JMXQuery( "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=*", metric_name="kafka_network_RequestMetrics_TotalTimeMs_{attribute}", metric_labels={"request": "{request}"}), # LeaderElectionsPerSec JMXQuery( "kafka.controller:type=ControllerStats,name=LeaderElectionRateAndTimeMs/Count", metric_name= "kafka_cluster_ControllerStats_LeaderElectionRateAndTimeMs_Count"), # UncleanLeaderElectionsPerSec JMXQuery( "kafka.controller:type=ControllerStats,name=UncleanLeaderElectionsPerSec/Count", metric_name= "kafka_cluster_ControllerStats_UncleanLeaderElectionsPerSec_Count" ), # PartitionCount JMXQuery("kafka.server:type=ReplicaManager,name=PartitionCount/Value", metric_name="kafka_server_ReplicaManager_PartitionCount"), # ISRShrinkRate JMXQuery("kafka.server:type=ReplicaManager,name=IsrShrinksPerSec", metric_name= "kafka_server_ReplicaManager_IsrShrinksPerSec_{attribute}"), # ISRExpandRate JMXQuery("kafka.server:type=ReplicaManager,name=IsrExpandsPerSec", metric_name= "kafka_server_ReplicaManager_IsrExpandsPerSec_{attribute}"), # NetworkProcessorAvgIdlePercent JMXQuery( "kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent/Value", metric_name= "kafka_network_SocketServer_NetworkProcessorAvgIdlePercent"), # RequestHandlerAvgIdlePercent JMXQuery( "kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent", metric_name= "kafka_server_KafkaRequestHandlerPool_RequestHandlerAvgIdlePercent_{attribute}" ), # ZooKeeperDisconnectsPerSec JMXQuery( "kafka.server:type=SessionExpireListener,name=ZooKeeperDisconnectsPerSec", metric_name= "kafka_server_SessionExpireListener_ZooKeeperDisconnectsPerSec_{attribute}" ), # ZooKeeperExpiresPerSec JMXQuery( "kafka.server:type=SessionExpireListener,name=ZooKeeperExpiresPerSec", metric_name= "kafka_server_SessionExpireListener_ZooKeeperExpiresPerSec_{attribute}" ), ] metrics = jmxConnection.query(jmxQuery) printMetrics(metrics) assert_greater_equal(len(metrics), 525)
""" jmxquery模块,通过JMX轻松运行查询并从Java虚拟机收集指标。 利用jmxquery查询kafka的指标: 安装jmxquery: pip3.6 install jmxquery 参考:https://github.com/dgildeh/JMXQuery 在kafka的启动文件./bin/kafka-server-start 增加JMX环境变量 if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" export JMX_PORT="8888" fi """ from jmxquery import JMXConnection, JMXQuery jmxConnection = JMXConnection("service:jmx:rmi:///jndi/rmi://127.0.0.1:8888/jmxrmi") jmxQuery = [JMXQuery("*:*")] metrics = jmxConnection.query(jmxQuery) for metric in metrics: print(f"{metric.to_query_string()} ({metric.value_type}) = {metric.value}")
def collect(self, _): try: host = self.get('ip', 'localhost') port = self.get('port', 9999) jmx_url = f'service:jmx:rmi:///jndi/rmi://{host}:{port}/jmxrmi' jmxConnection = JMXConnection(jmx_url) jmxQuery = [ # Class Loading JMXQuery( "java.lang:type=ClassLoading/LoadedClassCount", metric_name="java_lang_ClassLoading_LoadedClassCount"), JMXQuery( "java.lang:type=ClassLoading/UnloadedClassCount", metric_name="java_lang_ClassLoading_UnloadedClassCount"), JMXQuery( "java.lang:type=ClassLoading/UnloadedClassCount", metric_name="java_lang_ClassLoading_TotalLoadedClassCount" ), # Garbage Collection JMXQuery( "java.lang:type=GarbageCollector,name=*/CollectionTime", metric_name="java_lang_GarbageCollector_CollectionTime", metric_labels={"gc_name": "{name}"}), JMXQuery( "java.lang:type=GarbageCollector,name=*/CollectionCount", metric_name="java_lang_GarbageCollector_CollectionCount", metric_labels={"gc_name": "{name}"}), # Memory JMXQuery("java.lang:type=Memory/HeapMemoryUsage", metric_name= "java_lang_Memory_HeapMemoryUsage_{attributeKey}"), JMXQuery("java.lang:type=Memory/NonHeapMemoryUsage", metric_name= "java_lang_Memory_NonHeapMemoryUsage_{attributeKey}"), # Runtime JMXQuery("java.lang:type=Runtime/Uptime", metric_name="java_lang_Runtime_Uptime"), # Threading JMXQuery("java.lang:type=Threading/ThreadCount", metric_name="java_lang_Threading_ThreadCount"), JMXQuery("java.lang:type=Threading/PeakThreadCount", metric_name="java_lang_Threading_PeakThreadCount"), JMXQuery("java.lang:type=Threading/DaemonThreadCount", metric_name="java_lang_Threading_DaemonThreadCount"), ] metrics = jmxConnection.query(jmxQuery) for metric in metrics: try: if (metric.value_type != "String") and (metric.value_type != ""): if metric.metric_name.lower() in COUNTER_METRICS: self.counter(metric.metric_name, metric.metric_labels).set( metric.value) else: self.gauge(metric.metric_name, metric.metric_labels).set(metric.value) except: # Ignore if a new type is returned from JMX that isn't a number pass return Status.OK except Exception as ex: self.logger.error('Unable to scrape metrics from JVM: %s', str(ex)) return Status.CRITICAL