def collect(self): # Request data from ambari Collect Host API # Request exactly the System level information we need from node # beans returns a type of 'List' try: count = 0 # In case no metrics we need in the jmx url, a time sleep and while-loop was set here to wait for the KEY metrics while count < 5: beans = utils.get_metrics(self._url) if 'init_total_count_tables' not in beans: count += 1 time.sleep(1) continue else: break except: logger.info("Can't scrape metrics from url: {0}".format(self._url)) else: pass finally: # set up all metrics with labels and descriptions. self._setup_labels(beans) # add metric value to every metric. self._get_metrics(beans) # update namenode metrics with common metrics common_metrics = common_metrics_info(self._cluster, beans, "hive", "hiveserver2") self._hadoop_hiveserver2_metrics.update(common_metrics()) for i in range(len(self._merge_list)): service = self._merge_list[i] for metric in self._hadoop_hiveserver2_metrics[service]: yield self._hadoop_hiveserver2_metrics[service][metric]
def collect(self): # Request data from ambari Collect Host API # Request exactly the System level information we need from node # beans returns a type of 'List' try: beans = utils.get_metrics(self._url) except: logger.info("Can't scrape metrics from url: {0}".format(self._url)) pass else: # set up all metrics with labels and descriptions. self._setup_labels(beans) # add metric value to every metric. self._get_metrics(beans) # update namenode metrics with common metrics common_metrics = common_metrics_info(self._cluster, beans, "hbase", "regionserver") self._hadoop_regionserver_metrics.update(common_metrics()) for i in range(len(self._merge_list)): service = self._merge_list[i] for metric in self._hadoop_regionserver_metrics[service]: yield self._hadoop_regionserver_metrics[service][metric]
def collect(self): self._clear_init() # 发送HTTP请求从JMX URL中获取指标数据。 # 获取JMX中对应bean JSON数组。 try: # 发起HTTP请求JMX JSON数据 beans = utils.get_metrics(self._url) except: logger.info("Can't scrape metrics from url: {0}".format(self._url)) pass else: # 设置监控需要关注的每个MBean,并设置好指标对应的标签以及描述 self._setup_metrics_labels(beans) # 设置每个指标值 self._get_metrics(beans) # 将通用的指标更新到NameNode对应的指标中 common_metrics = common_metrics_info(self._cluster, beans, "hdfs", "namenode") self._hadoop_namenode_metrics.update(common_metrics()) # 遍历每一个指标分类(包含NameNode以及Common的指标分类) # 返回每一个指标和标签 for i in range(len(self._merge_list)): service = self._merge_list[i] for metric in self._hadoop_namenode_metrics[service]: yield self._hadoop_namenode_metrics[service][metric]