"metric": "hadoop.resourcemanager.hastate.total.count",
            "value": total_count
        })

        self.collect({
            "host": all_hosts_name,
            "component": "resourcemanager",
            "metric": "hadoop.resourcemanager.hastate.active.count",
            "value": active_count
        })

        self.collect({
            "host": all_hosts_name,
            "component": "resourcemanager",
            "metric": "hadoop.resourcemanager.hastate.standby.count",
            "value": standby_count
        })

        if len(failed_host_list) > 0:
            all_hosts_name = string.join(failed_host_list,",")

        self.collect({
            "host": all_hosts_name,
            "component": "resourcemanager",
            "metric": "hadoop.resourcemanager.hastate.failed.count",
            "value": failed_count
        })

if __name__ == '__main__':
    Runner.run(HadoopNNHAChecker(), HadoopHBaseHAChecker(), HadoopRMHAChecker())
Exemplo n.º 2
0
        if bean["name"] == "Hadoop:service=NameNode,name=FSNamesystemState":
            capacityusage = round(
                float(bean['CapacityUsed']) / float(bean['CapacityTotal']) *
                100, 2)
            self.collector.on_bean_kv(self.PREFIX, "capacityusage",
                                      capacityusage)


class JournalTransactionInfoMetric(JmxMetricListener):
    PREFIX = "hadoop.namenode.journaltransaction"

    def on_bean(self, bean):
        if bean.has_key("JournalTransactionInfo"):
            JournalTransactionInfo = json.loads(
                bean.get("JournalTransactionInfo"))
            LastAppliedOrWrittenTxId = float(
                JournalTransactionInfo.get("LastAppliedOrWrittenTxId"))
            MostRecentCheckpointTxId = float(
                JournalTransactionInfo.get("MostRecentCheckpointTxId"))
            self.collector.on_bean_kv(self.PREFIX, "LastAppliedOrWrittenTxId",
                                      LastAppliedOrWrittenTxId)
            self.collector.on_bean_kv(self.PREFIX, "MostRecentCheckpointTxId",
                                      MostRecentCheckpointTxId)


if __name__ == '__main__':
    collector = JmxMetricCollector()
    collector.register(NNSafeModeMetric(), NNHAMetric(), MemortUsageMetric(),
                       JournalTransactionInfoMetric(), NNCapacityUsageMetric())
    Runner.run(collector)
Exemplo n.º 3
0
            memnonheapcommittedusage = round(float(bean['MemNonHeapCommittedM']) / float(bean['MemNonHeapMaxM']) * 100,
                                             2)
            self.collector.on_bean_kv(self.PREFIX, "memnonheapcommittedusage", memnonheapcommittedusage)
            memheapusedusage = round(float(bean['MemHeapUsedM']) / float(bean['MemHeapMaxM']) * 100, 2)
            self.collector.on_bean_kv(self.PREFIX, "memheapusedusage", memheapusedusage)
            memheapcommittedusage = round(float(bean['MemHeapCommittedM']) / float(bean['MemHeapMaxM']) * 100, 2)
            self.collector.on_bean_kv(self.PREFIX, "memheapcommittedusage", memheapcommittedusage)


class JournalTransactionInfoMetric(JmxMetricListener):
    PREFIX = "hadoop.namenode.journaltransaction"

    def on_bean(self, bean):
        if bean.has_key("JournalTransactionInfo"):
            JournalTransactionInfo = json.loads(bean.get("JournalTransactionInfo"))
            LastAppliedOrWrittenTxId = float(JournalTransactionInfo.get("LastAppliedOrWrittenTxId"))
            MostRecentCheckpointTxId = float(JournalTransactionInfo.get("MostRecentCheckpointTxId"))
            self.collector.on_bean_kv(self.PREFIX, "LastAppliedOrWrittenTxId", LastAppliedOrWrittenTxId)
            self.collector.on_bean_kv(self.PREFIX, "MostRecentCheckpointTxId", MostRecentCheckpointTxId)


if __name__ == '__main__':
    collector = JmxMetricCollector()
    collector.register(
            NNSafeModeMetric(),
            NNHAMetric(),
            MemortUsageMetric(),
            JournalTransactionInfoMetric()
    )
    Runner.run(collector)
Exemplo n.º 4
0
            "value": total_count
        })

        self.collect({
            "host": all_hosts_name,
            "component": "resourcemanager",
            "metric": "hadoop.resourcemanager.hastate.active.count",
            "value": active_count
        })

        self.collect({
            "host": all_hosts_name,
            "component": "resourcemanager",
            "metric": "hadoop.resourcemanager.hastate.standby.count",
            "value": standby_count
        })

        if len(failed_host_list) > 0:
            all_hosts_name = string.join(failed_host_list, ",")

        self.collect({
            "host": all_hosts_name,
            "component": "resourcemanager",
            "metric": "hadoop.resourcemanager.hastate.failed.count",
            "value": failed_count
        })


if __name__ == '__main__':
    Runner.run(HadoopNNHAChecker(), HadoopHBaseHAChecker(),
               HadoopRMHAChecker())
        for key, metrics in iostat_dict.iteritems():
            for i in range(len(metrics)):
                metric = 'disk.' + demension[i]
                kafka_dict = self.new_metric("system.disk")
                kafka_dict['metric'] = DATA_TYPE + "." + metric.lower()
                kafka_dict["timestamp"] = int(round(time.time() * 1000))
                kafka_dict["value"] = metrics[i]
                kafka_dict["device"] = key
                self.collect(kafka_dict)

    # ====================================
    # Helper Methods
    # ====================================

    def emit_metric(self, event, prefix, metric, value, device):
        event["timestamp"] = int(round(time.time() * 1000))
        event["metric"] = prefix + "." + metric.lower()
        event["value"] = str(value)
        event["device"] = device
        self.collect(event)

    def new_metric(self, group):
        metric = dict()
        metric["host"] = self.fqdn
        metric["group"] = group
        return metric


if __name__ == '__main__':
    Runner.run(SystemMetricCollector())
        for key, metrics in iostat_dict.iteritems():
            for i in range(len(metrics)):
                metric = 'disk.' + demension[i]
                kafka_dict = self.new_metric()
                kafka_dict['metric'] = DATA_TYPE + "." + metric.lower()
                kafka_dict["timestamp"] = int(round(time.time() * 1000))
                kafka_dict["value"] = metrics[i]
                kafka_dict["device"] = key
                self.collect(kafka_dict)

    # ====================================
    # Helper Methods
    # ====================================

    def emit_metric(self, event, prefix, metric, value, device):
        event["timestamp"] = int(round(time.time() * 1000))
        event["metric"] = prefix + "." + metric.lower()
        event["value"] = str(value)
        event["device"] = device
        self.collect(event)

    def new_metric(self):
        metric = dict()
        metric["host"] = self.fqdn
        return metric


if __name__ == '__main__':
    Runner.run(SystemMetricCollector())