if bean["name"] == "Hadoop:service=NameNode,name=FSNamesystemState": capacityusage = round( float(bean['CapacityUsed']) / float(bean['CapacityTotal']) * 100, 2) self.collector.on_bean_kv(self.PREFIX, "capacityusage", capacityusage) class JournalTransactionInfoMetric(JmxMetricListener): PREFIX = "hadoop.namenode.journaltransaction" def on_bean(self, bean): if bean.has_key("JournalTransactionInfo"): JournalTransactionInfo = json.loads( bean.get("JournalTransactionInfo")) LastAppliedOrWrittenTxId = float( JournalTransactionInfo.get("LastAppliedOrWrittenTxId")) MostRecentCheckpointTxId = float( JournalTransactionInfo.get("MostRecentCheckpointTxId")) self.collector.on_bean_kv(self.PREFIX, "LastAppliedOrWrittenTxId", LastAppliedOrWrittenTxId) self.collector.on_bean_kv(self.PREFIX, "MostRecentCheckpointTxId", MostRecentCheckpointTxId) if __name__ == '__main__': collector = JmxMetricCollector() collector.register(NNSafeModeMetric(), NNHAMetric(), MemortUsageMetric(), JournalTransactionInfoMetric(), NNCapacityUsageMetric()) Runner.run(collector)
if fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.capacity"): metric["metric"] = "hadoop.datanode.fsdatasetstate.capacity" self.collector.collect(metric) elif fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.dfsused"): metric["metric"] = "hadoop.datanode.fsdatasetstate.dfsused" self.collector.collect(metric) class HBaseRegionServerMetric(JmxMetricListener): def on_metric(self, metric): """ Rename metric "hadoop.hbase.ipc.ipc.*" to "hadoop.hbase.regionserver.ipc.*" to support different hbase version metric """ if fnmatch.fnmatch(metric["metric"], "hadoop.hbase.ipc.ipc.*" ) and metric["component"] == "regionserver": new_metric_name = metric["metric"].replace( "hadoop.hbase.ipc.ipc.", "hadoop.hbase.regionserver.ipc.") logging.debug("Rename metric %s to %s" % (metric["metric"], new_metric_name)) metric["metric"] = new_metric_name self.collector.collect(metric) if __name__ == '__main__': collector = JmxMetricCollector() collector.register(NNSafeModeMetric(), NNHAMetric(), MemoryUsageMetric(), NNCapacityUsageMetric(), JournalTransactionInfoMetric(), DatanodeFSDatasetState(), HBaseRegionServerMetric()) Runner.run(collector)
memnonheapcommittedusage = round(float(bean['MemNonHeapCommittedM']) / float(bean['MemNonHeapMaxM']) * 100, 2) self.collector.on_bean_kv(self.PREFIX, "memnonheapcommittedusage", memnonheapcommittedusage) memheapusedusage = round(float(bean['MemHeapUsedM']) / float(bean['MemHeapMaxM']) * 100, 2) self.collector.on_bean_kv(self.PREFIX, "memheapusedusage", memheapusedusage) memheapcommittedusage = round(float(bean['MemHeapCommittedM']) / float(bean['MemHeapMaxM']) * 100, 2) self.collector.on_bean_kv(self.PREFIX, "memheapcommittedusage", memheapcommittedusage) class JournalTransactionInfoMetric(JmxMetricListener): PREFIX = "hadoop.namenode.journaltransaction" def on_bean(self, bean): if bean.has_key("JournalTransactionInfo"): JournalTransactionInfo = json.loads(bean.get("JournalTransactionInfo")) LastAppliedOrWrittenTxId = float(JournalTransactionInfo.get("LastAppliedOrWrittenTxId")) MostRecentCheckpointTxId = float(JournalTransactionInfo.get("MostRecentCheckpointTxId")) self.collector.on_bean_kv(self.PREFIX, "LastAppliedOrWrittenTxId", LastAppliedOrWrittenTxId) self.collector.on_bean_kv(self.PREFIX, "MostRecentCheckpointTxId", MostRecentCheckpointTxId) if __name__ == '__main__': collector = JmxMetricCollector() collector.register( NNSafeModeMetric(), NNHAMetric(), MemortUsageMetric(), JournalTransactionInfoMetric() ) Runner.run(collector)
metric["metric"] = "hadoop.datanode.fsdatasetstate.capacity" self.collector.collect(metric) elif fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.dfsused"): metric["metric"] = "hadoop.datanode.fsdatasetstate.dfsused" self.collector.collect(metric) class HBaseRegionServerMetric(JmxMetricListener): def on_metric(self, metric): """ Rename metric "hadoop.hbase.ipc.ipc.*" to "hadoop.hbase.regionserver.ipc.*" to support different hbase version metric """ if fnmatch.fnmatch(metric["metric"], "hadoop.hbase.ipc.ipc.*" ) and metric["component"] == "regionserver": new_metric_name = metric["metric"].replace( "hadoop.hbase.ipc.ipc.", "hadoop.hbase.regionserver.ipc.") logging.debug("Rename metric %s to %s" % (metric["metric"], new_metric_name)) metric["metric"] = new_metric_name self.collector.collect(metric) if __name__ == '__main__': collector = JmxMetricCollector() collector.register(NNSafeModeMetric(), NNFileSystemMetric(), MemoryUsageMetric(), NNCapacityUsageMetric(), JournalTransactionInfoMetric(), DatanodeFSDatasetState(), corruptfilesMetric(), TopUserOpCountsMetric()) Runner.run(collector)
if fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.capacity"): metric["metric"] = "hadoop.datanode.fsdatasetstate.capacity" self.collector.collect(metric) elif fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.dfsused"): metric["metric"] = "hadoop.datanode.fsdatasetstate.dfsused" self.collector.collect(metric) class HBaseRegionServerMetric(JmxMetricListener): def on_metric(self, metric): """ Rename metric "hadoop.hbase.ipc.ipc.*" to "hadoop.hbase.regionserver.ipc.*" to support different hbase version metric """ if fnmatch.fnmatch(metric["metric"],"hadoop.hbase.ipc.ipc.*") and metric["component"] == "regionserver": new_metric_name = metric["metric"].replace("hadoop.hbase.ipc.ipc.","hadoop.hbase.regionserver.ipc.") logging.debug("Rename metric %s to %s" % (metric["metric"], new_metric_name)) metric["metric"] = new_metric_name self.collector.collect(metric) if __name__ == '__main__': collector = JmxMetricCollector() collector.register( NNSafeModeMetric(), NNFileSystemMetric(), MemoryUsageMetric(), NNCapacityUsageMetric(), JournalTransactionInfoMetric(), DatanodeFSDatasetState(), corruptfilesMetric(), TopUserOpCountsMetric() ) Runner.run(collector)
if fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.capacity"): metric["metric"] = "hadoop.datanode.fsdatasetstate.capacity" self.collector.collect(metric) elif fnmatch.fnmatch(metric["metric"], "hadoop.datanode.fsdatasetstate-*.dfsused"): metric["metric"] = "hadoop.datanode.fsdatasetstate.dfsused" self.collector.collect(metric) class HBaseRegionServerMetric(JmxMetricListener): def on_metric(self, metric): """ Rename metric "hadoop.hbase.ipc.ipc.*" to "hadoop.hbase.regionserver.ipc.*" to support different hbase version metric """ if fnmatch.fnmatch(metric["metric"],"hadoop.hbase.ipc.ipc.*") and metric["component"] == "regionserver": new_metric_name = metric["metric"].replace("hadoop.hbase.ipc.ipc.","hadoop.hbase.regionserver.ipc.") logging.debug("Rename metric %s to %s" % (metric["metric"], new_metric_name)) metric["metric"] = new_metric_name self.collector.collect(metric) if __name__ == '__main__': collector = JmxMetricCollector() collector.register( NNSafeModeMetric(), NNHAMetric(), MemoryUsageMetric(), NNCapacityUsageMetric(), JournalTransactionInfoMetric(), DatanodeFSDatasetState(), HBaseRegionServerMetric(), NameNodeInfo() ) Runner.run(collector)