コード例 #1
0
 def process_gc_collector(self, bean, kept):
     context = bean['name'].split("java.lang:type=")[1].split(",name=")
     for key, value in bean.iteritems():
         if key in EXCLUDED_KEYS:
             continue
         if value is None:
             continue
         if key == 'LastGcInfo':
             context.append(key)
             for lastgc_key, lastgc_val in bean[key].iteritems():
                 if lastgc_key == 'memoryUsageAfterGc' or lastgc_key == 'memoryUsageBeforeGc':
                     context.append(lastgc_key)
                     for memusage in lastgc_val:  # lastgc_val is a list
                         context.append(memusage["key"])
                         for final_key, final_val in memusage[
                                 "value"].iteritems():
                             safe_context, safe_final_key = self.safe_replace(
                                 context, final_key)
                             kept.append(
                                 (safe_context, safe_final_key, final_val))
                         context.pop()
                     context.pop()
                 elif is_numeric(lastgc_val):
                     safe_context, safe_lastgc_key = self.safe_replace(
                         context, lastgc_key)
                     kept.append(
                         (safe_context, safe_lastgc_key, lastgc_val))
             context.pop()
         elif is_numeric(value):
             safe_context, safe_key = self.safe_replace(context, key)
             kept.append((safe_context, safe_key, value))
コード例 #2
0
ファイル: hadoop_http.py プロジェクト: natbraun/tcollector
    def poll(self):
        """
        Get metrics from the http server's /jmx page, and transform them into normalized tupes

        @return: array of tuples ([u'Context', u'Array'], u'metricName', value)
        """
        json_arr = self.request().get('beans', [])
        kept = []
        for bean in json_arr:
            if (not bean['name']) or (not "name=" in bean['name']):
                continue
            #split the name string
            bean_name = bean['name'].split("name=")[1]
            context = re.split(",sub=|,q[0-9]+=", bean_name)
            # Create a set that keeps the first occurrence
            context = OrderedDict.fromkeys(context).keys()
            # lower case and replace spaces.
            context = [c.lower().replace(" ", "_") for c in context]
            # don't want to include the service or daemon twice
            context = [c for c in context if c != self.service and c != self.daemon]

            for key, value in bean.iteritems():
                if key in EXCLUDED_KEYS:
                    continue
                value = value if is_numeric(value) else self.to_numeric(context, key, value)
                if not is_numeric(value):
                    continue
                kept.append((context, key, value))
        return kept
コード例 #3
0
    def poll(self):
        """
        Get metrics from the http server's /jmx page, and transform them into normalized tupes

        @return: array of tuples ([u'Context', u'Array'], u'metricName', value)
        """
        json_arr = self.request().get('beans', [])
        kept = []
        for bean in json_arr:
            if (not bean['name']) or (not "name=" in bean['name']):
                continue
            #split the name string
            context = bean['name'].split("name=")[1].split(",sub=")
            # Create a set that keeps the first occurrence
            context = OrderedDict.fromkeys(context).keys()
            # lower case and replace spaces.
            context = [c.lower().replace(" ", "_") for c in context]
            # don't want to include the service or daemon twice
            context = [
                c for c in context if c != self.service and c != self.daemon
            ]

            for key, value in bean.iteritems():
                if key in EXCLUDED_KEYS:
                    continue
                if not is_numeric(value):
                    continue
                kept.append((context, key, value))
        return kept
コード例 #4
0
 def print_metrics(self, d, metric_prefix, timestamp, tags, not_tags=[]):
     """ Take a dict of attributes and print out numerical metric strings
     Recurse if necessary
     """
     for k, v in d.items():
         # Tack on the name of the attribute
         attribute, more_tags = self.parse_attribute(k.lower(), not_tags)
         metric_name = '.'.join([metric_prefix, attribute])
         my_tags = tags + more_tags
         # If numerical
         if utils.is_numeric(v):
             print("%s %d %s %s" %
                   (metric_name, timestamp, str(v), ' '.join(my_tags)))
         # If a bool, True=1, False=0
         elif type(v) is bool:
             print("%s %d %s %s" %
                   (metric_name, timestamp, str(int(v)), ' '.join(my_tags)))
         # Or a dict of more attributes, call ourselves again
         elif type(v) is dict:
             self.print_metrics(v, metric_name, timestamp, my_tags,
                                not_tags)
         else:
             #lists, strings, etc
             #print '# ', type(v), metric_name, str(v)
             pass
コード例 #5
0
 def process_java_lang_metrics(self, bean, kept):
     context = bean['name'].split("java.lang:type=")[1].split(",name=")
     for key, value in bean.iteritems():
         if key in EXCLUDED_KEYS:
             continue
         if value is None:
             continue
         if is_numeric(value):
             safe_context, safe_key = self.safe_replace(context, key)
             kept.append((safe_context, safe_key, value))
         elif isinstance(
                 value, dict
         ):  # only go one level deep since there is no other level empirically
             for subkey, subvalue in value.iteritems():
                 if is_numeric(subvalue):
                     safe_context, final_key = self.safe_replace(
                         context, key + "." + subkey)
                     kept.append((safe_context, final_key, subvalue))
コード例 #6
0
    def poll(self):
        """
        Get metrics from the http server's /jmx page, and transform them into normalized tupes

        @return: array of tuples ([u'Context', u'Array'], u'metricName', value)
        """
        json_arr = self.request().get('beans', [])
        kept = []
        for bean in json_arr:
            try:
                if (bean['name']) and (bean['name'].startswith(
                        'java.lang:type=GarbageCollector')):
                    self.process_gc_collector(bean, kept)
                elif (bean['name']) and (
                        bean['name'].startswith('java.lang:type=')):
                    self.process_java_lang_metrics(bean, kept)
                elif (bean['name']) and ("name=" in bean['name']):
                    # split the name string
                    context = bean['name'].split("name=")[1].split(",sub=")
                    # Create a set that keeps the first occurrence
                    context = OrderedDict.fromkeys(context).keys()
                    # lower case and replace spaces.
                    context = [c.lower().replace(" ", "_") for c in context]
                    # don't want to include the service or daemon twice
                    context = [
                        c for c in context
                        if c != self.service and c != self.daemon
                    ]
                    for key, value in bean.iteritems():
                        if key in EXCLUDED_KEYS:
                            continue
                        if not is_numeric(value):
                            continue
                        kept.append((context, key, value))
            except Exception as e:
                self.logger.exception(
                    "exception in HadoopHttp when collecting %s", bean['name'])

        return kept
コード例 #7
0
ファイル: elasticsearch.py プロジェクト: think8848/tcollector
def _traverse(metric, stats, ts, tags):
    """
     Recursively traverse the json tree and print out leaf numeric values
     Please make sure you call this inside a lock and don't add locking
     inside this function
  """
    #print metric,stats,ts,tags
    if isinstance(stats, dict):
        if "timestamp" in stats:
            ts = stats["timestamp"] / 1000  # ms -> s
        for key in stats.keys():
            if key != "timestamp":
                _traverse(metric + "." + key, stats[key], ts, tags)
    if isinstance(stats, (list, set, tuple)):
        count = 0
        for value in stats:
            _traverse(metric + "." + str(count), value, ts, tags)
            count += 1
    if utils.is_numeric(stats) and not isinstance(stats, bool):
        if isinstance(stats, int):
            stats = int(stats)
        printmetric(metric, ts, stats, tags)
    return
コード例 #8
0
ファイル: elasticsearch.py プロジェクト: OpenTSDB/tcollector
def _traverse(metric, stats, ts, tags):
  """
     Recursively traverse the json tree and print out leaf numeric values
     Please make sure you call this inside a lock and don't add locking
     inside this function
  """
  #print metric,stats,ts,tags
  if isinstance(stats,dict):
    if "timestamp" in stats:
      ts = stats["timestamp"] / 1000 # ms -> s
    for key in stats.keys():
      if key != "timestamp":
        _traverse(metric + "." + key, stats[key], ts, tags)
  if isinstance(stats, (list, set, tuple)):
    count = 0
    for value in stats:
      _traverse(metric + "." + str(count), value, ts, tags)
      count += 1
  if utils.is_numeric(stats) and not isinstance(stats, bool):
    if isinstance(stats, int):
      stats = int(stats)
    printmetric(metric, ts, stats, tags)
  return
コード例 #9
0
ファイル: jolokia.py プロジェクト: wangy1931/tcollector
 def print_metrics(self, d, metric_prefix, timestamp, tags, not_tags=[]):
     """ Take a dict of attributes and print out numerical metric strings
     Recurse if necessary
     """
     for k, v in d.iteritems():
         # Tack on the name of the attribute
         attribute, more_tags = self.parse_attribute(k.lower(), not_tags)
         metric_name = '.'.join([metric_prefix, attribute])
         my_tags = tags + more_tags
         # If numerical
         if utils.is_numeric(v):
             print "%s %d %s %s" % (metric_name, timestamp, str(v),
                                     ' '.join(my_tags))
         # If a bool, True=1, False=0
         elif type(v) is bool:
             print "%s %d %s %s" % (metric_name, timestamp, str(int(v)),
                                     ' '.join(my_tags))
         # Or a dict of more attributes, call ourselves again
         elif type(v) is dict:
             self.print_metrics(v, metric_name, timestamp, my_tags, not_tags)
         else:
             #lists, strings, etc
             #print '# ', type(v), metric_name, str(v)
             pass
コード例 #10
0
ファイル: elasticsearch.py プロジェクト: Ulrhol/tcollector
          % (cluster_name, nstats["cluster_name"]))
      return 1
    this_nodeid, nstats = nstats["nodes"].popitem()
    if this_nodeid != nodeid:
      err("node ID changed from %r to %r" % (nodeid, this_nodeid))
      return 1

    is_master = nodeid == cluster_state(server)["master_node"]
    printmetric("is_master", int(is_master))
    if is_master:
      ts = int(time.time())  # In case last call took a while.
      cstats = cluster_health(server)
      for stat, value in cstats.iteritems():
        if stat == "status":
          value = STATUS_MAP.get(value, -1)
        elif not utils.is_numeric(value):
          continue
        printmetric("cluster." + stat, value)

    if "os" in nstats:
       ts = nstats["os"]["timestamp"] / 1000  # ms -> s
    if "timestamp" in nstats:
       ts = nstats["timestamp"] / 1000  # ms -> s

    if "indices" in nstats:
       indices = nstats["indices"]
       if  "docs" in indices:
          printmetric("num_docs", indices["docs"]["count"])
       if  "store" in indices:
          printmetric("indices.size", indices["store"]["size_in_bytes"])
       if  "indexing" in indices:
コード例 #11
0
def _collect_server(server, version):
    ts = int(time.time())
    nstats = node_stats(server, version)
    cluster_name = nstats["cluster_name"]
    nodeid, nstats = nstats["nodes"].popitem()
    node_name = nstats["name"]

    is_master = nodeid == cluster_state(server)["master_node"]
    printmetric("is_master",
                ts,
                int(is_master),
                node=node_name,
                cluster=cluster_name)
    if is_master:
        ts = int(time.time())  # In case last call took a while.
        cstats = cluster_health(server)
        for stat, value in cstats.iteritems():
            if stat == "status":
                value = STATUS_MAP.get(value, -1)
            elif not utils.is_numeric(value):
                continue
            printmetric("cluster." + stat, ts, value, cluster=cluster_name)

    if "os" in nstats:
        ts = nstats["os"]["timestamp"] / 1000  # ms -> s
    if "timestamp" in nstats:
        ts = nstats["timestamp"] / 1000  # ms -> s

    if "indices" in nstats:
        indices = nstats["indices"]
        if "docs" in indices:
            printmetric("num_docs",
                        ts,
                        indices["docs"]["count"],
                        node=node_name,
                        cluster=cluster_name)
        if "store" in indices:
            printmetric("indices.size",
                        ts,
                        indices["store"]["size_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
        if "indexing" in indices:
            d = indices["indexing"]
            printmetric("indexing.index_total",
                        ts,
                        d["index_total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("indexing.index_time",
                        ts,
                        d["index_time_in_millis"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("indexing.index_current",
                        ts,
                        d["index_current"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("indexing.delete_total",
                        ts,
                        d["delete_total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("indexing.delete_time_in_millis",
                        ts,
                        d["delete_time_in_millis"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("indexing.delete_current",
                        ts,
                        d["delete_current"],
                        node=node_name,
                        cluster=cluster_name)
            del d
        if "get" in indices:
            d = indices["get"]
            printmetric("get.total",
                        ts,
                        d["total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("get.time",
                        ts,
                        d["time_in_millis"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("get.exists_total",
                        ts,
                        d["exists_total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("get.exists_time",
                        ts,
                        d["exists_time_in_millis"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("get.missing_total",
                        ts,
                        d["missing_total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("get.missing_time",
                        ts,
                        d["missing_time_in_millis"],
                        node=node_name,
                        cluster=cluster_name)
            del d
        if "search" in indices:
            d = indices["search"]
            printmetric("search.query_total",
                        ts,
                        d["query_total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("search.query_time",
                        ts,
                        d["query_time_in_millis"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("search.query_current",
                        ts,
                        d["query_current"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("search.fetch_total",
                        ts,
                        d["fetch_total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("search.fetch_time",
                        ts,
                        d["fetch_time_in_millis"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("search.fetch_current",
                        ts,
                        d["fetch_current"],
                        node=node_name,
                        cluster=cluster_name)
            del d
        if "cache" in indices:
            d = indices["cache"]
            printmetric("cache.field.evictions",
                        ts,
                        d["field_evictions"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("cache.field.size",
                        ts,
                        d["field_size_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("cache.filter.count",
                        ts,
                        d["filter_count"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("cache.filter.evictions",
                        ts,
                        d["filter_evictions"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("cache.filter.size",
                        ts,
                        d["filter_size_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            del d
        if "merges" in indices:
            d = indices["merges"]
            printmetric("merges.current",
                        ts,
                        d["current"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("merges.total",
                        ts,
                        d["total"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("merges.total_time",
                        ts,
                        d["total_time_in_millis"] / 1000.,
                        node=node_name,
                        cluster=cluster_name)
            del d
        del indices
    if "process" in nstats:
        process = nstats["process"]
        ts = process["timestamp"] / 1000  # ms -> s
        open_fds = process.get("open_file_descriptors")  # ES 0.17
        if open_fds is None:
            open_fds = process.get("fd")  # ES 0.16
            if open_fds is not None:
                open_fds = open_fds["total"]
        if open_fds is not None:
            printmetric("process.open_file_descriptors",
                        ts,
                        open_fds,
                        node=node_name,
                        cluster=cluster_name)
        if "cpu" in process:
            d = process["cpu"]
            printmetric("process.cpu.percent",
                        ts,
                        d["percent"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("process.cpu.sys",
                        ts,
                        d["sys_in_millis"] / 1000.,
                        node=node_name,
                        cluster=cluster_name)
            printmetric("process.cpu.user",
                        ts,
                        d["user_in_millis"] / 1000.,
                        node=node_name,
                        cluster=cluster_name)
            del d
        if "mem" in process:
            d = process["mem"]
            printmetric("process.mem.resident",
                        ts,
                        d["resident_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("process.mem.shared",
                        ts,
                        d["share_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("process.mem.total_virtual",
                        ts,
                        d["total_virtual_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            del d
        del process
    if "jvm" in nstats:
        jvm = nstats["jvm"]
        ts = jvm["timestamp"] / 1000  # ms -> s
        if "mem" in jvm:
            d = jvm["mem"]
            printmetric("jvm.mem.heap_used",
                        ts,
                        d["heap_used_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("jvm.mem.heap_committed",
                        ts,
                        d["heap_committed_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("jvm.mem.non_heap_used",
                        ts,
                        d["non_heap_used_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("jvm.mem.non_heap_committed",
                        ts,
                        d["non_heap_committed_in_bytes"],
                        node=node_name,
                        cluster=cluster_name)
            del d
        if "threads" in jvm:
            d = jvm["threads"]
            printmetric("jvm.threads.count",
                        ts,
                        d["count"],
                        node=node_name,
                        cluster=cluster_name)
            printmetric("jvm.threads.peak_count",
                        ts,
                        d["peak_count"],
                        node=node_name,
                        cluster=cluster_name)
            del d
        for gc, d in jvm["gc"]["collectors"].iteritems():
            printmetric("jvm.gc.collection_count",
                        ts,
                        d["collection_count"],
                        gc=gc,
                        node=node_name,
                        cluster=cluster_name)
            printmetric("jvm.gc.collection_time",
                        ts,
                        d["collection_time_in_millis"] / 1000.,
                        gc=gc,
                        node=node_name,
                        cluster=cluster_name)
        del jvm
        del d
    if "network" in nstats:
        for stat, value in nstats["network"]["tcp"].iteritems():
            if utils.is_numeric(value):
                printmetric("network.tcp." + stat,
                            ts,
                            value,
                            node=node_name,
                            cluster=cluster_name)
        for stat, value in nstats["transport"].iteritems():
            if utils.is_numeric(value):
                printmetric("transport." + stat,
                            ts,
                            value,
                            node=node_name,
                            cluster=cluster_name)
    # New in ES 0.17:
    for stat, value in nstats.get("http", {}).iteritems():
        if utils.is_numeric(value):
            printmetric("http." + stat,
                        ts,
                        value,
                        node=node_name,
                        cluster=cluster_name)
    del nstats
コード例 #12
0
                      (cluster_name, nstats["cluster_name"]))
            return 1
        this_nodeid, nstats = nstats["nodes"].popitem()
        if this_nodeid != nodeid:
            utils.err("node ID changed from %r to %r" % (nodeid, this_nodeid))
            return 1

        is_master = nodeid == cluster_state(server)["master_node"]
        printmetric("is_master", int(is_master))
        if is_master:
            ts = int(time.time())  # In case last call took a while.
            cstats = cluster_health(server)
            for stat, value in cstats.iteritems():
                if stat == "status":
                    value = STATUS_MAP.get(value, -1)
                elif not utils.is_numeric(value):
                    continue
                printmetric("cluster." + stat, value)

        if "os" in nstats:
            ts = nstats["os"]["timestamp"] / 1000  # ms -> s
        if "timestamp" in nstats:
            ts = nstats["timestamp"] / 1000  # ms -> s

        if "indices" in nstats:
            indices = nstats["indices"]
            if "docs" in indices:
                printmetric("num_docs", indices["docs"]["count"])
            if "store" in indices:
                printmetric("indices.size", indices["store"]["size_in_bytes"])
            if "indexing" in indices:
コード例 #13
0
ファイル: elasticsearch.py プロジェクト: anrs/tcollector
def _collect_server(server, version):
  ts = int(time.time())
  nstats = node_stats(server, version)
  cluster_name = nstats["cluster_name"]
  nodeid, nstats = nstats["nodes"].popitem()
  node_name = nstats["name"]

  is_master = nodeid == cluster_state(server)["master_node"]
  printmetric("is_master", ts, int(is_master), node=node_name, cluster=cluster_name)
  if is_master:
    ts = int(time.time())  # In case last call took a while.
    cstats = cluster_health(server)
    for stat, value in cstats.iteritems():
      if stat == "status":
        value = STATUS_MAP.get(value, -1)
      elif not utils.is_numeric(value):
        continue
      printmetric("cluster." + stat, ts, value, cluster=cluster_name)

  if "os" in nstats:
     ts = nstats["os"]["timestamp"] / 1000  # ms -> s
  if "timestamp" in nstats:
     ts = nstats["timestamp"] / 1000  # ms -> s

  if "indices" in nstats:
     indices = nstats["indices"]
     if  "docs" in indices:
        printmetric("num_docs", ts, indices["docs"]["count"], node=node_name, cluster=cluster_name)
     if  "store" in indices:
        printmetric("indices.size", ts, indices["store"]["size_in_bytes"], node=node_name, cluster=cluster_name)
     if  "indexing" in indices:
        d = indices["indexing"]
        printmetric("indexing.index_total", ts, d["index_total"], node=node_name, cluster=cluster_name)
        printmetric("indexing.index_time", ts, d["index_time_in_millis"], node=node_name, cluster=cluster_name)
        printmetric("indexing.index_current", ts, d["index_current"], node=node_name, cluster=cluster_name)
        printmetric("indexing.delete_total", ts, d["delete_total"], node=node_name, cluster=cluster_name)
        printmetric("indexing.delete_time_in_millis", ts, d["delete_time_in_millis"], node=node_name, cluster=cluster_name)
        printmetric("indexing.delete_current", ts, d["delete_current"], node=node_name, cluster=cluster_name)
        del d
     if  "get" in indices:
        d = indices["get"]
        printmetric("get.total", ts, d["total"], node=node_name, cluster=cluster_name)
        printmetric("get.time", ts, d["time_in_millis"], node=node_name, cluster=cluster_name)
        printmetric("get.exists_total", ts, d["exists_total"], node=node_name, cluster=cluster_name)
        printmetric("get.exists_time", ts, d["exists_time_in_millis"], node=node_name, cluster=cluster_name)
        printmetric("get.missing_total", ts, d["missing_total"], node=node_name, cluster=cluster_name)
        printmetric("get.missing_time", ts, d["missing_time_in_millis"], node=node_name, cluster=cluster_name)
        del d
     if  "search" in indices:
        d = indices["search"]
        printmetric("search.query_total", ts, d["query_total"], node=node_name, cluster=cluster_name)
        printmetric("search.query_time", ts, d["query_time_in_millis"], node=node_name, cluster=cluster_name)
        printmetric("search.query_current", ts, d["query_current"], node=node_name, cluster=cluster_name)
        printmetric("search.fetch_total", ts, d["fetch_total"], node=node_name, cluster=cluster_name)
        printmetric("search.fetch_time", ts, d["fetch_time_in_millis"], node=node_name, cluster=cluster_name)
        printmetric("search.fetch_current", ts, d["fetch_current"], node=node_name, cluster=cluster_name)
        del d
     if "cache" in indices:
        d = indices["cache"]
        printmetric("cache.field.evictions", ts, d["field_evictions"], node=node_name, cluster=cluster_name)
        printmetric("cache.field.size", ts, d["field_size_in_bytes"], node=node_name, cluster=cluster_name)
        printmetric("cache.filter.count", ts, d["filter_count"], node=node_name, cluster=cluster_name)
        printmetric("cache.filter.evictions", ts, d["filter_evictions"], node=node_name, cluster=cluster_name)
        printmetric("cache.filter.size", ts, d["filter_size_in_bytes"], node=node_name, cluster=cluster_name)
        del d
     if "merges" in indices:
        d = indices["merges"]
        printmetric("merges.current", ts, d["current"], node=node_name, cluster=cluster_name)
        printmetric("merges.total", ts, d["total"], node=node_name, cluster=cluster_name)
        printmetric("merges.total_time", ts, d["total_time_in_millis"] / 1000., node=node_name, cluster=cluster_name)
        del d
     del indices
  if "process" in nstats:
     process = nstats["process"]
     ts = process["timestamp"] / 1000  # ms -> s
     open_fds = process.get("open_file_descriptors")  # ES 0.17
     if open_fds is None:
       open_fds = process.get("fd")  # ES 0.16
       if open_fds is not None:
         open_fds = open_fds["total"]
     if open_fds is not None:
       printmetric("process.open_file_descriptors", ts, open_fds, node=node_name, cluster=cluster_name)
     if "cpu" in process:
        d = process["cpu"]
        printmetric("process.cpu.percent", ts, d["percent"], node=node_name, cluster=cluster_name)
        printmetric("process.cpu.sys", ts, d["sys_in_millis"] / 1000., node=node_name, cluster=cluster_name)
        printmetric("process.cpu.user", ts, d["user_in_millis"] / 1000., node=node_name, cluster=cluster_name)
        del d
     if "mem" in process:
        d = process["mem"]
        printmetric("process.mem.resident", ts, d["resident_in_bytes"], node=node_name, cluster=cluster_name)
        printmetric("process.mem.shared", ts, d["share_in_bytes"], node=node_name, cluster=cluster_name)
        printmetric("process.mem.total_virtual", ts, d["total_virtual_in_bytes"], node=node_name, cluster=cluster_name)
        del d
     del process
  if "jvm" in nstats:
     jvm = nstats["jvm"]
     ts = jvm["timestamp"] / 1000  # ms -> s
     if "mem" in jvm:
        d = jvm["mem"]
        printmetric("jvm.mem.heap_used", ts, d["heap_used_in_bytes"], node=node_name, cluster=cluster_name)
        printmetric("jvm.mem.heap_committed", ts, d["heap_committed_in_bytes"], node=node_name, cluster=cluster_name)
        printmetric("jvm.mem.non_heap_used", ts, d["non_heap_used_in_bytes"], node=node_name, cluster=cluster_name)
        printmetric("jvm.mem.non_heap_committed", ts, d["non_heap_committed_in_bytes"], node=node_name, cluster=cluster_name)
        del d
     if "threads" in jvm:
        d = jvm["threads"]
        printmetric("jvm.threads.count", ts, d["count"], node=node_name, cluster=cluster_name)
        printmetric("jvm.threads.peak_count", ts, d["peak_count"], node=node_name, cluster=cluster_name)
        del d
     for gc, d in jvm["gc"]["collectors"].iteritems():
       printmetric("jvm.gc.collection_count", ts, d["collection_count"], gc=gc, node=node_name, cluster=cluster_name)
       printmetric("jvm.gc.collection_time", ts,
                   d["collection_time_in_millis"] / 1000., gc=gc, node=node_name, cluster=cluster_name)
     del jvm
     del d
  if "network" in nstats:
     for stat, value in nstats["network"]["tcp"].iteritems():
       if utils.is_numeric(value):
         printmetric("network.tcp." + stat, ts, value, node=node_name, cluster=cluster_name)
     for stat, value in nstats["transport"].iteritems():
       if utils.is_numeric(value):
         printmetric("transport." + stat, ts, value, node=node_name, cluster=cluster_name)
  # New in ES 0.17:
  for stat, value in nstats.get("http", {}).iteritems():
    if utils.is_numeric(value):
      printmetric("http." + stat, ts, value, node=node_name, cluster=cluster_name)
  del nstats