Ejemplo n.º 1
0
  def collect(self):
    bgp_state = {
      "Idle": '0',
      "Active": '1' ,
      "Connect" : '2',
      "OpenSent": '3',
      "OpenConfirm": '4',
      "Established": '5'
      }
    url = self._endpoint
    response = json.loads(requests.get(url).content.decode('UTF-8'))
    json_all_bgp = response['value']
    metric = Metric('bgp_state_info', '', 'gauge')
    for i in range(len(json_all_bgp)):
      cbgp = json_all_bgp[i]['value']
      conn = json_all_bgp[i]['name'].split(':')
      snode = conn[4]
      dnode = conn[9]
      if ('BgpPeerInfoData' in cbgp and 'state_info' in cbgp['BgpPeerInfoData']):
        local_id = cbgp['BgpPeerInfoData']['local_id']
        state = cbgp['BgpPeerInfoData']['state_info']['state']
        metric.add_sample('bgp_state_info', value= int(bgp_state[state]) + 1, labels={
          'bgp_last_state': cbgp['BgpPeerInfoData']['state_info']['last_state'],
          'bgp_state': state,
          'control_node': snode,
          'peer': dnode,
          'local_id': str(local_id),
          })
    yield metric


    BgpPeerInfoData = {
      'long_lived_graceful_restart_time': {
        'type': 'gauge',
        'description': 'None'
      },
      'peer_type': {
        'type': 'gauge',
        'description': 'None'
      },
      'origin_override': {
        'type': 'gauge',
        'description': 'None'
      },
      'cluster_id': {
        'type': 'gauge',
        'description': 'None'
      },
      'peer_address': {
        'type': 'gauge',
        'description': 'None'
      },
      'as_override': {
        'type': 'gauge',
        'description': 'None'
      },
      'state_info': {
        'type': 'dict_mix',
        'item_dict_mix': {
          'last_state': 'None',
          'state': 'None',
          'last_state_at': 'None'
        }
        # 'description': 'None'
      },
      'graceful_restart_time': {
        'type': 'gauge',
        'description': 'None'
      },
      'local_asn': {
        'type': 'gauge',
        'description': 'None'
      },
      'event_info': {
        'type': 'dict_mix',
        'item_dict_mix': {
          'last_event_at': 'None',
          'last_event': 'None',
        }
        # 'description': 'None'
      },
      'passive': {
        'type': 'gauge',
        'description': 'None'
      },
      'negotiated_families': {
        'type': 'list_str',
        'description': 'None'
      },
      'peer_id': {
        'type': 'gauge',
        'description': 'None'
      },
      'route_origin': {
        'type': 'gauge',
        'description': 'None'
      },
      'peer_asn': {
        'type': 'gauge',
        'description': 'None'
      },
      'families': {
        'type': 'list_str',
        'description': 'None'
      },
      'admin_down': {
        'type': 'gauge',
        'description': 'None'
      },
      'configured_families': {
        'type': 'list_str',
        'description': 'None'
      },
      'peer_port': {
        'type': 'gauge',
        'description': 'None'
      },
      'hold_time': {
        'type': 'gauge',
        'description': 'None'
      },
      'peer_stats_info': {
        'type': 'dict_dict',
        'description': 'None',
        'item_dict_dict':
        {
          'rx_proto_stats/notification': 'None',
          'rx_proto_stats/update': 'None',
          'rx_proto_stats/close': 'None',
          'rx_proto_stats/total': 'None',
          'rx_proto_stats/keepalive': 'None',
          'tx_proto_stats/notification': 'None',
          'tx_proto_stats/update': 'None',
          'tx_proto_stats/close': 'None',
          'tx_proto_stats/total': 'None',
          'tx_proto_stats/keepalive': 'None',
          'rx_update_stats/unreach': 'None',
          'rx_update_stats/total': 'None',
          'rx_update_stats/reach': 'None',
          'rx_update_stats/end_of_rib': 'None',
          'tx_update_stats/unreach': 'None',
          'tx_update_stats/total': 'None',
          'tx_update_stats/reach': 'None',
          'tx_update_stats/end_of_rib': 'None',
          'rx_route_stats/primary_path_count': 'None',
          'rx_route_stats/total_path_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_xml_token_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_afi_safi_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_nexthop_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_prefix_count': 'None',
        }
      },
      'local_id': {
        'type': 'gauge',
        'description': 'None'
      },
      'router_type': {
        'type': 'gauge',
        'description': 'None'
      },
    }
    for name_metric in BgpPeerInfoData:
      if (BgpPeerInfoData[name_metric]['type'] == 'gauge'):
        metric = Metric('bgp_'+name_metric, '', 'gauge')
        for i in range(len(json_all_bgp)):
          cbgp = json_all_bgp[i]['value']
          conn = json_all_bgp[i]['name'].split(':')
          if ('BgpPeerInfoData' in cbgp and name_metric in cbgp['BgpPeerInfoData']):
            snode = conn[4]
            dnode = conn[9]
            local_id = cbgp['BgpPeerInfoData']['local_id']
            # if ('peer_id' not in cbgp['BgpPeerInfoData']): print(json_all_bgp[i]['name'])
            if (type(cbgp['BgpPeerInfoData'][name_metric]) in [int,float] ):
              metric.add_sample('bgp_'+name_metric, value = cbgp['BgpPeerInfoData'][name_metric], labels={
                'local_id': str(local_id),
                'control_node': snode,
                'peer': dnode,
                })
            elif (type(cbgp['BgpPeerInfoData'][name_metric]) is str):
              metric.add_sample('bgp_'+name_metric, value = 1, labels={
                name_metric: cbgp['BgpPeerInfoData'][name_metric],
                'local_id': str(local_id),
                'control_node': snode,
                'peer': dnode,
                })
        yield metric
      elif (BgpPeerInfoData[name_metric]['type'] == 'dict_mix'):
        for metric_item in BgpPeerInfoData[name_metric]['item_dict_mix']:
          metric = Metric('bgp_'+name_metric + '_' + metric_item, BgpPeerInfoData[name_metric]['item_dict_mix'][metric_item], 'gauge')
          for i in range(len(json_all_bgp)):
            cbgp = json_all_bgp[i]['value']
            conn = json_all_bgp[i]['name'].split(':')
            if ('BgpPeerInfoData' in cbgp and name_metric in cbgp['BgpPeerInfoData']):
              snode = conn[4]
              dnode = conn[9]
              local_id = cbgp['BgpPeerInfoData']['local_id']
              if (type(cbgp['BgpPeerInfoData'][name_metric][metric_item]) in [int,float] ):
                metric.add_sample('bgp_'+name_metric + '_' + metric_item, value = cbgp['BgpPeerInfoData'][name_metric][metric_item], labels={
                  'local_id': str(local_id),
                  'control_node': snode,
                  'peer': dnode,
                  })
              elif (type(cbgp['BgpPeerInfoData'][name_metric][metric_item]) is str):
                metric.add_sample('bgp_'+name_metric + '_' + metric_item, value = 1, labels={
                  metric_item: cbgp['BgpPeerInfoData'][name_metric][metric_item],
                  'local_id': str(local_id),
                  'control_node': snode,
                  'peer': dnode,
                  })
          yield metric
      elif (BgpPeerInfoData[name_metric]['type'] == 'list_str'):
        metric = Metric('bgp_'+name_metric, BgpPeerInfoData[name_metric]['description'], 'gauge')
        for i in range(len(json_all_bgp)):
          cbgp = json_all_bgp[i]['value']
          conn = json_all_bgp[i]['name'].split(':')
          if ('BgpPeerInfoData' in cbgp and name_metric in cbgp['BgpPeerInfoData']):
            snode = conn[4]
            dnode = conn[9]
            local_id = cbgp['BgpPeerInfoData']['local_id']
            for k in range(len(cbgp['BgpPeerInfoData'][name_metric])):
              metric.add_sample('bgp_'+name_metric, value = 1, labels={
                name_metric: cbgp['BgpPeerInfoData'][name_metric][k],
                'local_id': str(local_id),
                'control_node': snode,
                'peer': dnode,
                })
        yield metric
      elif (BgpPeerInfoData[name_metric]['type'] == 'dict_dict'):
        for metric_item in BgpPeerInfoData[name_metric]['item_dict_dict']:
          item = metric_item
          path = metric_item.split('/')
          metric_item = metric_item.replace('/','_')
          metric = Metric('bgp_'+metric_item, BgpPeerInfoData[name_metric]['item_dict_dict'][item], 'gauge')
          for i in range(len(json_all_bgp)):
            cbgp = json_all_bgp[i]['value']
            conn = json_all_bgp[i]['name'].split(':')
            if ('BgpPeerInfoData' in cbgp and name_metric in cbgp['BgpPeerInfoData']):
              snode = conn[4]
              dnode = conn[9]
              
              value = cbgp['BgpPeerInfoData'][name_metric]
              for k in range(len(path)):
                value = value[path[k]]
              metric.add_sample('bgp_'+metric_item, value = value, labels={
                'local_id': str(local_id),
                'control_node': snode,
                'peer': dnode,
                })
          yield metric 
Ejemplo n.º 2
0
  def collect(self):
    xmpp_state = {
      "Idle": '0',
      "Active": '1' ,
      "Connect" : '2',
      "OpenSent": '3',
      "OpenConfirm": '4',
      "Established": '5'
      }
    XmppPeerInfoData = {
      'state_info': {
        'type': 'dict_mix',
        'item_dict_mix': {
          'last_state': 'None',
          'state': 'None',
          'last_state_at': 'None'
        }
        # 'description': 'None'
      },
      'peer_stats_info': {
        'type': 'dict_dict',
        'description': 'None',
        'item_dict_dict':
        {
          'rx_proto_stats/notification': 'None',
          'rx_proto_stats/update': 'None',
          'rx_proto_stats/close': 'None',
          'rx_proto_stats/total': 'None',
          'rx_proto_stats/keepalive': 'None',
          'tx_proto_stats/notification': 'None',
          'tx_proto_stats/update': 'None',
          'tx_proto_stats/close': 'None',
          'tx_proto_stats/total': 'None',
          'tx_proto_stats/keepalive': 'None',
          'rx_update_stats/unreach': 'None',
          'rx_update_stats/total': 'None',
          'rx_update_stats/reach': 'None',
          'rx_update_stats/end_of_rib': 'None',
          'tx_update_stats/unreach': 'None',
          'tx_update_stats/total': 'None',
          'tx_update_stats/reach': 'None',
          'tx_update_stats/end_of_rib': 'None',
          'rx_route_stats/primary_path_count': 'None',
          'rx_route_stats/total_path_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_xml_token_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_afi_safi_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_nexthop_count': 'None',
          'rx_error_stats/inet6_error_stats/bad_inet6_prefix_count': 'None',
        }
      },
      'event_info': {
        'type': 'dict_mix',
        'item_dict_mix': {
          'last_event_at': 'None',
          'last_event': 'None',
        }
        # 'description': 'None'
      },

    }
    url = self._endpoint
    response = json.loads(requests.get(url).content.decode('UTF-8'))
    json_all_xmpp = response['value']

    metric = Metric('xmpp_state_info', '', 'gauge')
    for i in range(len(json_all_xmpp)):
      cxmpp = json_all_xmpp[i]['value']
      conn =  json_all_xmpp[i]['name'].split(':')
      control_node = conn[0]
      compute_node = conn[1]
      if ('XmppPeerInfoData' in cxmpp and 'event_info' in cxmpp['XmppPeerInfoData']):
        if ('state_info' not in cxmpp['XmppPeerInfoData']):
          metric.add_sample('xmpp_state_info', value = 0, labels={
            'last_event': cxmpp['XmppPeerInfoData']['event_info']['last_event'],
            'control_node': control_node,
            'compute_node': compute_node,
            'state': 'null'
            })
        else:
          state = cxmpp['XmppPeerInfoData']['state_info']['state']
          metric.add_sample('xmpp_state_info', value = int(xmpp_state[state]) + 1, labels={
            'last_event': cxmpp['XmppPeerInfoData']['event_info']['last_event'],
            'control_node': control_node,
            'compute_node': compute_node,
            'state': cxmpp['XmppPeerInfoData']['state_info']['state'],
            })
    yield metric

    for name_metric in XmppPeerInfoData:
      if(XmppPeerInfoData[name_metric]['type'] == 'dict_dict'):
        for metric_item in XmppPeerInfoData[name_metric]['item_dict_dict']:
          item = metric_item
          path = metric_item.split('/')
          metric_item = metric_item.replace('/','_')
          metric = Metric('xmpp_'+metric_item, XmppPeerInfoData[name_metric]['item_dict_dict'][item], 'gauge')
          for i in range(len(json_all_xmpp)):
            cxmpp = json_all_xmpp[i]['value']
            conn =  json_all_xmpp[i]['name'].split(':')
            control_node = conn[0]
            compute_node = conn[1]
            if ('XmppPeerInfoData' in cxmpp and name_metric in cxmpp['XmppPeerInfoData']):
              
              value = cxmpp['XmppPeerInfoData'][name_metric]
              for k in range(len(path)):
                value = value[path[k]]
              metric.add_sample('xmpp_'+metric_item, value = value, labels={
                'control_node': control_node,
                'compute_node': compute_node,
                })
          yield metric 

      elif(XmppPeerInfoData[name_metric]['type'] == 'dict_mix'):
        for metric_item in XmppPeerInfoData[name_metric]['item_dict_mix']:
          metric = Metric('xmpp_'+name_metric + '_' + metric_item, XmppPeerInfoData[name_metric]['item_dict_mix'][metric_item], 'gauge')
          for i in range(len(json_all_xmpp)):
            cxmpp = json_all_xmpp[i]['value']
            conn =  json_all_xmpp[i]['name'].split(':')
            control_node = conn[0]
            compute_node = conn[1]
            if ('XmppPeerInfoData' in cxmpp and name_metric in cxmpp['XmppPeerInfoData']):
              if (type(cxmpp['XmppPeerInfoData'][name_metric][metric_item]) in [int,float] ):
                metric.add_sample('xmpp_'+name_metric + '_' + metric_item, value = cxmpp['XmppPeerInfoData'][name_metric][metric_item], labels={
                  'control_node': control_node,
                  'compute_node': compute_node,
                  })
              elif (type(cxmpp['XmppPeerInfoData'][name_metric][metric_item]) is str):
                metric.add_sample('xmpp_'+name_metric + '_' + metric_item, value = 1, labels={
                  metric_item: cxmpp['XmppPeerInfoData'][name_metric][metric_item],
                  'control_node': control_node,
                  'compute_node': compute_node,
                  })
          yield metric
 def collect(self):
     metric = Metric("hh", "help", 'histogram')
     # This is not sane, but it covers all the cases.
     metric.add_sample("hh_count", {}, 0, None,
                       Exemplar({'a': 'b'}, 0.5))
     yield metric
Ejemplo n.º 4
0
 def collect(self):
     metric = Metric("nonnumber", "Non number", 'untyped')
     metric.add_sample("nonnumber", {}, MyNumber())
     yield metric
Ejemplo n.º 5
0
    def collect(self):
        eps = self._endpoint.split(':')
        ep = eps[0]

        # Get system summary
        response = self.get('/v1/system/summary')
        if response:
            sjson = json.loads(response.text)
            # Set summary metrics
            metric = Metric('nv_summary', 'A summary of ' + ep, 'summary')
            metric.add_sample('nv_summary_services',
                              value=sjson["summary"]["services"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_policy',
                              value=sjson["summary"]["policy_rules"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_runningWorkloads',
                              value=sjson["summary"]["running_workloads"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_totalWorkloads',
                              value=sjson["summary"]["workloads"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_hosts',
                              value=sjson["summary"]["hosts"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_controllers',
                              value=sjson["summary"]["controllers"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_enforcers',
                              value=sjson["summary"]["enforcers"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_pods',
                              value=sjson["summary"]["running_pods"],
                              labels={'target': ep})
            metric.add_sample('nv_summary_disconnectedEnforcers',
                              value=sjson["summary"]["disconnected_enforcers"],
                              labels={'target': ep})
            dt = sjson["summary"]["cvedb_create_time"]
            if not dt:
                metric.add_sample('nv_summary_cvedbVersion',
                                  value=1.0,
                                  labels={'target': ep})
            else:
                metric.add_sample('nv_summary_cvedbVersion',
                                  value=sjson["summary"]["cvedb_version"],
                                  labels={'target': ep})
            # Convert time, set CVEDB create time
            dt = sjson["summary"]["cvedb_create_time"]
            if not dt:
                metric.add_sample('nv_summary_cvedbTime',
                                  value=0,
                                  labels={'target': ep})
            else:
                ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%SZ')
                metric.add_sample('nv_summary_cvedbTime',
                                  value=time.mktime(ts) * 1000,
                                  labels={'target': ep})
            yield metric

        # Get conversation
        response = self.get('/v1/conversation')
        if response:
            # Set conversation metrics
            metric = Metric('nv_conversation', 'conversation of ' + ep,
                            'gauge')
            for c in json.loads(response.text)['conversations']:
                try:
                    c['ports']
                except KeyError:
                    port_exists = False
                else:
                    port_exists = True
                if port_exists is True:
                    for k in c['ports']:
                        if c['bytes'] is not 0:
                            metric.add_sample('nv_conversation_bytes',
                                              value=c['bytes'],
                                              labels={
                                                  'port': k,
                                                  'from': c['from'],
                                                  'to': c['to'],
                                                  'target': ep
                                              })
            yield metric

        # Get enforcer
        response = self.get('/v1/enforcer')
        if response:
            # Read each enforcer, set enforcer metrics
            metric = Metric('nv_enforcer', 'enforcers of ' + ep, 'gauge')
            for c in json.loads(response.text)['enforcers']:
                response2 = self.get('/v1/enforcer/' + c['id'] + '/stats')
                if response2:
                    ejson = json.loads(response2.text)
                    metric.add_sample('nv_enforcer_cpu',
                                      value=ejson['stats']['span_1']['cpu'],
                                      labels={
                                          'id': c['id'],
                                          'host': c['host_name'],
                                          'display': c['display_name'],
                                          'target': ep
                                      })
                    metric.add_sample('nv_enforcer_memory',
                                      value=ejson['stats']['span_1']['memory'],
                                      labels={
                                          'id': c['id'],
                                          'host': c['host_name'],
                                          'display': c['display_name'],
                                          'target': ep
                                      })
            yield metric

        # Get host
        response = self.get('/v1/host')
        if response:
            # Set host metrics
            metric = Metric('nv_host', 'host information of ' + ep, 'gauge')
            for c in json.loads(response.text)['hosts']:
                metric.add_sample('nv_host_memory',
                                  value=c['memory'],
                                  labels={
                                      'name': c['name'],
                                      'id': c['id'],
                                      'target': ep
                                  })
            yield metric

        # Get debug admission stats
        response = self.get('/v1/debug/admission_stats')
        if response:
            if response.status_code != 200:
                print("Admission control stats request failed: %s" % response)
            else:
                djson = json.loads(response.text)
                # Set admission metrics
                metric = Metric('nv_admission',
                                'Debug admission stats of ' + ep, 'gauge')
                metric.add_sample('nv_admission_allowed',
                                  value=djson['stats']['k8s_allowed_requests'],
                                  labels={'target': ep})
                metric.add_sample('nv_admission_denied',
                                  value=djson['stats']['k8s_denied_requests'],
                                  labels={'target': ep})
                yield metric

        # Get image vulnerability
        response = self.get('/v1/scan/registry')
        if response:
            # Set vulnerability metrics
            metric = Metric('nv_image_vulnerability',
                            'image vulnerability of ' + ep, 'gauge')
            for c in json.loads(response.text)['summarys']:
                response2 = self.get('/v1/scan/registry/' + c['name'] +
                                     '/images')
                if response2:
                    for img in json.loads(response2.text)['images']:
                        metric.add_sample('nv_image_vulnerabilityHigh',
                                          value=img['high'],
                                          labels={
                                              'name':
                                              "%s:%s" %
                                              (img['repository'], img['tag']),
                                              'imageid':
                                              img['image_id'],
                                              'target':
                                              ep
                                          })
                        metric.add_sample('nv_image_vulnerabilityMedium',
                                          value=img['medium'],
                                          labels={
                                              'name':
                                              "%s:%s" %
                                              (img['repository'], img['tag']),
                                              'imageid':
                                              img['image_id'],
                                              'target':
                                              ep
                                          })
            yield metric

        # Get container vulnerability
        response = self.get('/v1/workload?brief=true')
        if response:
            # Set vulnerability metrics
            cvlist = []
            metric = Metric('nv_container_vulnerability',
                            'container vulnerability of ' + ep, 'gauge')
            for c in json.loads(response.text)['workloads']:
                if c['service'] not in cvlist and c[
                        'service_mesh_sidecar'] is False:
                    scan = c['scan_summary']
                    if scan != None and (scan['high'] != 0
                                         or scan['medium'] != 0):
                        metric.add_sample('nv_container_vulnerabilityHigh',
                                          value=scan['high'],
                                          labels={
                                              'service': c['service'],
                                              'target': ep
                                          })
                        metric.add_sample('nv_container_vulnerabilityMedium',
                                          value=scan['medium'],
                                          labels={
                                              'service': c['service'],
                                              'target': ep
                                          })
                        cvlist.append(c['service'])
            yield metric

        # Set Log metrics
        metric = Metric('nv_log', 'log of ' + ep, 'gauge')
        # Get log threat
        response = self.get('/v1/log/threat')
        if response:
            # Set threat
            ttimelist = []
            tnamelist = []
            tcnamelist = []
            tsnamelist = []
            tidlist = []
            for c in json.loads(response.text)['threats']:
                ttimelist.append(c['reported_timestamp'])
                tnamelist.append(c['name'])
                tcnamelist.append(c['client_workload_name'])
                tsnamelist.append(c['server_workload_name'])
                tidlist.append(c['id'])
            for x in range(0, min(5, len(tidlist))):
                metric.add_sample('nv_log_events',
                                  value=ttimelist[x] * 1000,
                                  labels={
                                      'log': "thread",
                                      'fromname': tcnamelist[x],
                                      'toname': " -> " + tsnamelist[x],
                                      'id': tidlist[x],
                                      'name': tnamelist[x],
                                      'target': ep
                                  })

        # Get log incident
        response = self.get('/v1/log/incident')
        if response:
            # Set incident metrics
            itimelist = []
            inamelist = []
            iwnamelist = []
            iidlist = []
            for c in json.loads(response.text)['incidents']:
                if 'workload_name' in c:
                    itimelist.append(c['reported_timestamp'])
                    inamelist.append(c['name'])
                    iwnamelist.append(c['workload_name'])
                    iidlist.append(c['workload_id'])
            for x in range(0, min(5, len(iidlist))):
                metric.add_sample('nv_log_events',
                                  value=itimelist[x] * 1000,
                                  labels={
                                      'log': "incident",
                                      'fromname': iwnamelist[x],
                                      'toname': " ",
                                      'name': inamelist[x],
                                      'id': iidlist[x],
                                      'target': ep
                                  })

        # Get log violation
        response = self.get('/v1/log/violation')
        if response:
            # Set violation metrics
            vtimelist = []
            vnamelist = []
            vcnamelist = []
            vsnamelist = []
            vidlist = []
            for c in json.loads(response.text)['violations']:
                vtimelist.append(c['reported_timestamp'])
                vcnamelist.append(c['client_name'])
                vnamelist.append(c['name'])
                vsnamelist.append(c['server_name'])
                vidlist.append(c['client_id'] + c['server_id'])
            for x in range(0, min(5, len(vidlist))):
                metric.add_sample('nv_log_events',
                                  value=vtimelist[x] * 1000,
                                  labels={
                                      'log': "violation",
                                      'id': vidlist[x],
                                      'toname': " -> " + vsnamelist[x],
                                      'fromname': vcnamelist[x],
                                      'name': vnamelist[x],
                                      'target': ep
                                  })
            yield metric
Ejemplo n.º 6
0
    def parse_metrics(self, data):
        """Generator for trafficserver metrics."""
        # Counter for server restarts
        metric = Metric(
            'trafficserver_restart_count',
            'Count of traffic_server restarts.',
            'counter')
        metric.add_sample(
            'trafficserver_restart_count',
            value=data['proxy.node.restarts.proxy.restart_count'],
            labels={})
        yield metric

        #
        # HTTP
        #
        # Connections
        metric = Metric(
            'trafficserver_connections_total',
            'Connection count.',
            'counter')
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_client_connections'],
            labels={'source': 'client',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_server_connections'],
            labels={'source': 'server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_parent_proxy_connections'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        # Incoming requests
        metric = Metric(
            'trafficserver_requests_incoming',
            'Incoming requests.',
            'gauge')
        metric.add_sample(
            'trafficserver_requests_incoming',
            value=data['proxy.process.http.incoming_requests'],
            labels={'protocol': 'http'})
        yield metric

        # Client aborts
        metric = Metric(
            'trafficserver_error_client_aborts_total',
            'Client aborts.',
            'counter')
        metric.add_sample(
            'trafficserver_client_aborts_total',
            value=data['proxy.process.http.err_client_abort_count_stat'],
            labels={'protocol': 'http'})
        yield metric

        # Connect fails
        metric = Metric(
            'trafficserver_connect_failures_total',
            'Connect failures.',
            'counter')
        metric.add_sample(
            'trafficserver_connect_failures_total',
            value=data['proxy.process.http.err_connect_fail_count_stat'],
            labels={'protocol': 'http'})
        yield metric

        # Transaction count
        metric = Metric(
            'trafficserver_transactions_total',
            'Total transactions.',
            'counter')
        metric.add_sample(
            'trafficserver_transactions_total',
            value=data[('proxy.node.http.'
                        'user_agents_total_transactions_count')],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transactions_total',
            value=data[('proxy.node.http.'
                        'origin_server_total_transactions_count')],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, total
        metric = Metric(
            'trafficserver_transactions_time_ms_total',
            'Total transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_transactions_time_total',
            value=data['proxy.process.http.total_transactions_time'],
            labels={})
        yield metric

        # Transaction time spent, hits
        metric = Metric(
            'trafficserver_hit_transaction_time_ms_total',
            'Total cache hit transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_hit_transaction_time_ms_total',
            value=data['proxy.process.http.transaction_totaltime.hit_fresh'],
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_hit_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'hit_revalidated')],
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, misses
        metric = Metric(
            'trafficserver_miss_transaction_time_ms_total',
            'Total cache miss transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data['proxy.process.http.transaction_totaltime.miss_cold'],
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_not_cacheable')],
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_changed')],
            labels={'state': 'changed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_client_no_cache')],
            labels={'state': 'no_cache',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, errors
        metric = Metric(
            'trafficserver_error_transaction_time_ms_total',
            'Total cache error transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'aborts')],
            labels={'state': 'abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'possible_aborts')],
            labels={'state': 'possible_abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'connect_failed')],
            labels={'state': 'connect_failed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'other')],
            labels={'state': 'other',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, other
        metric = Metric(
            'trafficserver_other_transaction_time_ms_total',
            'Total other/unclassified transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_other_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.other.'
                        'unclassified')],
            labels={'state': 'unclassified',
                    'protocol': 'http'})
        yield metric

        # Transaction count, hits
        metric = Metric(
            'trafficserver_transaction_hits_total',
            'Transaction hit counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'hit_fresh')],
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'hit_revalidated')],
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        # Zero labels (misses)
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'changed',
                    'protocol': 'http'})
        yield metric

        # Transaction count, misses
        metric = Metric(
            'trafficserver_transaction_misses_total',
            'Transaction miss counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_cold')],
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_not_cacheable')],
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_changed')],
            labels={'state': 'changed',
                    'protocol': 'http'})
        # Zero labels (hits)
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value='0',
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value='0',
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        yield metric

        # Transaction count, errors
        metric = Metric(
            'trafficserver_transaction_errors_total',
            'Transaction error counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'aborts')],
            labels={'state': 'abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'possible_aborts')],
            labels={'state': 'possible_abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'connect_failed')],
            labels={'state': 'connect_failed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'other')],
            labels={'state': 'other',
                    'protocol': 'http'})
        yield metric

        # Transaction count, others
        metric = Metric(
            'trafficserver_transaction_others_total',
            'Transaction other/unclassified counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_others_total',
            value=data[('proxy.process.http.transaction_counts.other.'
                        'unclassified')],
            labels={'state': 'unclassified',
                    'protocol': 'http'})
        yield metric

        # HTTP Responses
        metric = Metric(
            'trafficserver_responses_total',
            'Response count.',
            'counter')
        for code in TS_RESPONSE_CODES:
            key = 'proxy.process.http.{code}_responses'.format(code=code)
            metric.add_sample(
                'trafficserver_responses_total',
                value=data[key],
                labels={'code': code,
                        'protocol': 'http'})
        yield metric

        # HTTP Requests
        metric = Metric(
            'trafficserver_requests_total',
            'Request count.',
            'counter')
        for method in HTTP_VERBS_LOWER:
            key = 'proxy.process.http.{method}_requests'.format(method=method)
            metric.add_sample(
                'trafficserver_requests_total',
                value=data[key],
                labels={'method': method,
                        'protocol': 'http'})
        yield metric

        # Invalid requests
        metric = Metric(
            'trafficserver_client_requests_invalid_total',
            'Invalid client requests.',
            'counter')
        metric.add_sample(
            'trafficserver_client_requests_invalid_total',
            value=data['proxy.process.http.invalid_client_requests'],
            labels={'protocol': 'http'})
        yield metric

        # Requests without Host header
        metric = Metric(
            'trafficserver_client_requests_missing_host_hdr_total',
            'Client requests missing host header.',
            'counter')
        metric.add_sample(
            'trafficserver_client_requests_missing_host_hdr_total',
            value=data['proxy.process.http.missing_host_hdr'],
            labels={'protocol': 'http'})
        yield metric

        # Request size
        metric = Metric(
            'trafficserver_request_size_bytes_total',
            'Request size in bytes.',
            'counter')
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.user_agent_total_request_bytes'],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.origin_server_total_request_bytes'],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.parent_proxy_total_request_bytes'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        # Response size
        metric = Metric(
            'trafficserver_response_size_bytes_total',
            'Response size in bytes.',
            'counter')
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.user_agent_total_response_bytes'],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.origin_server_total_response_bytes'],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.parent_proxy_total_response_bytes'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        #
        # Cache
        #
        # Gather all cache volumes for cache statistics
        volumes = set()
        for key in data:
            if key.startswith('proxy.process.cache.volume_'):
                m = CACHE_VOLUMES.match(key)
                volumes.add(int(m.group(1)))

        # Create all cache volume metrics
        for volume in volumes:
            for metric in self._parse_volume_metrics(data, volume):
                yield metric

        metric = Metric(
            'trafficserver_ram_cache_hits_total',
            'RAM cache hit count.',
            'counter')
        metric.add_sample(
            'trafficserver_ram_cache_hits_total',
            value=data['proxy.process.cache.ram_cache.hits'],
            labels={'volume': str(volume)})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_misses_total',
            'RAM cache miss count.',
            'counter')
        metric.add_sample(
            'trafficserver_ram_cache_misses_total',
            value=data['proxy.process.cache.ram_cache.misses'],
            labels={})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_avail_size_bytes_total',
            'RAM cache available in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_ram_cache_avail_size_bytes_total',
            value=data['proxy.process.cache.ram_cache.total_bytes'],
            labels={})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_used_bytes_total',
            'RAM cache used in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_ram_cache_used_bytes_total',
            value=data['proxy.process.cache.ram_cache.bytes_used'],
            labels={})
        yield metric
Ejemplo n.º 7
0
 def collect(self):
     metric = Metric("cc", "A counter", 'counter')
     metric.add_sample("cc_total", {}, 1, None, None)
     metric.add_sample("cc_created", {}, 123.456, None, Exemplar({'a': 'b'}, 1.0, 123.456))
     yield metric
Ejemplo n.º 8
0
 def _new_metric(metric_desc: str, metric_name: str):
     metric = Metric('telldus_sensor_data', metric_desc, metric_name)
     return metric
Ejemplo n.º 9
0
    def collect(self):
        # collect metrics only when locust runner is hatching or running.
        if runners.locust_runner and runners.locust_runner.state in (
                runners.STATE_HATCHING, runners.STATE_RUNNING):

            stats = []

            for s in chain(
                    locust_stats.sort_stats(
                        runners.locust_runner.request_stats),
                [runners.locust_runner.stats.total]):
                stats.append({
                    "method":
                    s.method,
                    "name":
                    s.name,
                    "num_requests":
                    s.num_requests,
                    "num_failures":
                    s.num_failures,
                    "avg_response_time":
                    s.avg_response_time,
                    "min_response_time":
                    s.min_response_time or 0,
                    "max_response_time":
                    s.max_response_time,
                    "current_rps":
                    s.current_rps,
                    "median_response_time":
                    s.median_response_time,
                    "ninetieth_response_time":
                    s.get_response_time_percentile(0.9),
                    # only total stats can use current_response_time, so sad.
                    #"current_response_time_percentile_95": s.get_current_response_time_percentile(0.95),
                    "avg_content_length":
                    s.avg_content_length,
                    "current_fail_per_sec":
                    s.current_fail_per_sec
                })

            # perhaps StatsError.parse_error in e.to_dict only works in python slave, take notices!
            errors = [
                e.to_dict()
                for e in six.itervalues(runners.locust_runner.errors)
            ]

            metric = Metric('locust_user_count', 'Swarmed users', 'gauge')
            metric.add_sample('locust_user_count',
                              value=runners.locust_runner.user_count,
                              labels={})
            yield metric

            metric = Metric('locust_errors', 'Locust requests errors', 'gauge')
            for err in errors:
                metric.add_sample('locust_errors',
                                  value=err['occurrences'],
                                  labels={
                                      'path': err['name'],
                                      'method': err['method'],
                                      'error': err['error']
                                  })
            yield metric

            is_distributed = isinstance(runners.locust_runner,
                                        runners.MasterLocustRunner)
            if is_distributed:
                metric = Metric('locust_slave_count',
                                'Locust number of slaves', 'gauge')
                metric.add_sample('locust_slave_count',
                                  value=len(
                                      runners.locust_runner.clients.values()),
                                  labels={})
                yield metric

            metric = Metric('locust_fail_ratio', 'Locust failure ratio',
                            'gauge')
            metric.add_sample(
                'locust_fail_ratio',
                value=runners.locust_runner.stats.total.fail_ratio,
                labels={})
            yield metric

            metric = Metric('locust_state', 'State of the locust swarm',
                            'gauge')
            metric.add_sample('locust_state',
                              value=1,
                              labels={'state': runners.locust_runner.state})
            yield metric

            stats_metrics = [
                'avg_content_length', 'avg_response_time', 'current_rps',
                'current_fail_per_sec', 'max_response_time',
                'ninetieth_response_time', 'median_response_time',
                'min_response_time', 'num_failures', 'num_requests'
            ]

            for mtr in stats_metrics:
                mtype = 'gauge'
                if mtr in ['num_requests', 'num_failures']:
                    mtype = 'counter'
                metric = Metric('locust_stats_' + mtr, 'Locust stats ' + mtr,
                                mtype)
                for stat in stats:
                    # Aggregated stat's method label is None, so name it as Aggregated
                    # locust has changed name Total to Aggregated since 0.12.1
                    if 'Aggregated' != stat['name']:
                        metric.add_sample('locust_stats_' + mtr,
                                          value=stat[mtr],
                                          labels={
                                              'path': stat['name'],
                                              'method': stat['method']
                                          })
                    else:
                        metric.add_sample('locust_stats_' + mtr,
                                          value=stat[mtr],
                                          labels={
                                              'path': stat['name'],
                                              'method': 'Aggregated'
                                          })
                yield metric
Ejemplo n.º 10
0
    def _parse_volume_metrics(self, data, volume):
        metric = Metric("trafficserver_ram_cache_hits_total",
                        "RAM cache hit count.", "counter")
        metric.add_sample(
            "trafficserver_ram_cache_hits_total",
            value=float(data["proxy.process.cache.ram_cache.hits"]),
            labels={"volume": str(volume)},
        )
        yield metric

        metric = Metric(
            "trafficserver_cache_avail_size_bytes_total",
            "Total cache available.",
            "gauge",
        )
        metric.add_sample(
            "trafficserver_cache_avail_size_bytes_total",
            value=float(data[
                "proxy.process.cache.volume_{0}.bytes_used".format(volume)]),
            labels={"volume": str(volume)},
        )
        yield metric

        metric = Metric(
            "trafficserver_cache_used_bytes_total",
            "Total cache used in bytes.",
            "gauge",
        )
        metric.add_sample(
            "trafficserver_cache_used_bytes_total",
            value=float(data[
                "proxy.process.cache.volume_{0}.bytes_total".format(volume)]),
            labels={"volume": str(volume)},
        )
        yield metric

        metric = Metric("trafficserver_cache_operations_total",
                        "Cache operation count.", "counter")
        for op in (
                "lookup",
                "read",
                "write",
                "update",
                "remove",
                "evacuate",
                "scan",
                "read_busy",
        ):
            for result in ("success", "failure"):
                k = "proxy.process.cache.volume_{volume}.{op}.{result}".format(
                    volume=volume, op=op, result=result)
                metric.add_sample(
                    "trafficserver_cache_operations_total",
                    value=float(data[k]),
                    labels={
                        "volume": str(volume),
                        "operation": op,
                        "result": result
                    },
                )
        yield metric
Ejemplo n.º 11
0
    def parse_metrics(self, data):
        """Generator for trafficserver metrics."""
        # Counter for server restarts
        metric = Metric(
            "trafficserver_restart_count",
            "Count of traffic_server restarts.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_restart_count",
            value=float(data["proxy.node.restarts.proxy.restart_count"]),
            labels={},
        )
        yield metric

        #
        # HTTP
        #
        # Connections
        metric = Metric("trafficserver_connections_total", "Connection count.",
                        "counter")
        metric.add_sample(
            "trafficserver_connections_total",
            value=float(data["proxy.process.http.total_client_connections"]),
            labels={
                "source": "client",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_connections_total",
            value=float(data["proxy.process.http.total_server_connections"]),
            labels={
                "source": "server",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_connections_total",
            value=float(
                data["proxy.process.http.total_parent_proxy_connections"]),
            labels={
                "source": "parent_proxy",
                "protocol": "http"
            },
        )
        yield metric

        # Incoming requests
        metric = Metric("trafficserver_requests_incoming",
                        "Incoming requests.", "gauge")
        metric.add_sample(
            "trafficserver_requests_incoming",
            value=float(data["proxy.process.http.incoming_requests"]),
            labels={"protocol": "http"},
        )
        yield metric

        # Client aborts
        metric = Metric("trafficserver_error_client_aborts_total",
                        "Client aborts.", "counter")
        metric.add_sample(
            "trafficserver_client_aborts_total",
            value=float(
                data["proxy.process.http.err_client_abort_count_stat"]),
            labels={"protocol": "http"},
        )
        yield metric

        # Connect fails
        metric = Metric("trafficserver_connect_failures_total",
                        "Connect failures.", "counter")
        metric.add_sample(
            "trafficserver_connect_failures_total",
            value=float(
                data["proxy.process.http.err_connect_fail_count_stat"]),
            labels={"protocol": "http"},
        )
        yield metric

        # Transaction count
        metric = Metric("trafficserver_transactions_total",
                        "Total transactions.", "counter")
        metric.add_sample(
            "trafficserver_transactions_total",
            value=float(data[("proxy.node.http."
                              "user_agents_total_transactions_count")]),
            labels={
                "source": "user_agent",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transactions_total",
            value=float(
                data["proxy.node.http.origin_server_total_transactions_count"]
            ),
            labels={
                "source": "origin_server",
                "protocol": "http"
            },
        )
        yield metric

        # Transaction time spent, total
        metric = Metric(
            "trafficserver_transactions_time_ms_total",
            "Total transaction time (ms).",
            "counter",
        )
        metric.add_sample(
            "trafficserver_transactions_time_total",
            value=float(data["proxy.process.http.total_transactions_time"]),
            labels={},
        )
        yield metric

        # Transaction time spent, hits
        metric = Metric(
            "trafficserver_hit_transaction_time_ms_total",
            "Total cache hit transaction time (ms).",
            "counter",
        )
        metric.add_sample(
            "trafficserver_hit_transaction_time_ms_total",
            value=float(
                data["proxy.process.http.transaction_totaltime.hit_fresh"]),
            labels={
                "state": "fresh",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_hit_transaction_time_ms_total",
            value=float(data[
                "proxy.process.http.transaction_totaltime.hit_revalidated"]),
            labels={
                "state": "revalidated",
                "protocol": "http"
            },
        )
        yield metric

        # Transaction time spent, misses
        metric = Metric(
            "trafficserver_miss_transaction_time_ms_total",
            "Total cache miss transaction time (ms).",
            "counter",
        )
        metric.add_sample(
            "trafficserver_miss_transaction_time_ms_total",
            value=float(
                data["proxy.process.http.transaction_totaltime.miss_cold"]),
            labels={
                "state": "cold",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_miss_transaction_time_ms_total",
            value=float(data[
                "proxy.process.http.transaction_totaltime.miss_not_cacheable"]
                        ),
            labels={
                "state": "not_cacheable",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_miss_transaction_time_ms_total",
            value=float(
                data["proxy.process.http.transaction_totaltime.miss_changed"]),
            labels={
                "state": "changed",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_miss_transaction_time_ms_total",
            value=float(data[
                "proxy.process.http.transaction_totaltime.miss_client_no_cache"]
                        ),
            labels={
                "state": "no_cache",
                "protocol": "http"
            },
        )
        yield metric

        # Transaction time spent, errors
        metric = Metric(
            "trafficserver_error_transaction_time_ms_total",
            "Total cache error transaction time (ms).",
            "counter",
        )
        metric.add_sample(
            "trafficserver_error_transaction_time_ms_total",
            value=float(
                data["proxy.process.http.transaction_totaltime.errors.aborts"]
            ),
            labels={
                "state": "abort",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_error_transaction_time_ms_total",
            value=float(data[("proxy.process.http.transaction_totaltime."
                              "errors.possible_aborts")]),
            labels={
                "state": "possible_abort",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_error_transaction_time_ms_total",
            value=float(data[("proxy.process.http.transaction_totaltime."
                              "errors.connect_failed")]),
            labels={
                "state": "connect_failed",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_error_transaction_time_ms_total",
            value=float(data[("proxy.process.http.transaction_totaltime."
                              "errors.other")]),
            labels={
                "state": "other",
                "protocol": "http"
            },
        )
        yield metric

        # Transaction time spent, other
        metric = Metric(
            "trafficserver_other_transaction_time_ms_total",
            "Total other/unclassified transaction time (ms).",
            "counter",
        )
        try:
            metric.add_sample(
                "trafficserver_other_transaction_time_ms_total",
                value=float(data[("proxy.process.http.transaction_totaltime."
                                  "errors.unclassified")]),
                labels={
                    "state": "unclassified",
                    "protocol": "http"
                },
            )
        except KeyError:
            pass
        else:
            yield metric

        # Transaction count, hits
        metric = Metric("trafficserver_transaction_hits_total",
                        "Transaction hit counts.", "counter")
        metric.add_sample(
            "trafficserver_transaction_hits_total",
            value=float(
                data["proxy.process.http.transaction_counts.hit_fresh"]),
            labels={
                "state": "fresh",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_hits_total",
            value=float(
                data["proxy.process.http.transaction_counts.hit_revalidated"]),
            labels={
                "state": "revalidated",
                "protocol": "http"
            },
        )
        # Zero labels (misses)
        metric.add_sample(
            "trafficserver_transaction_hits_total",
            value=0.0,
            labels={
                "state": "cold",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_hits_total",
            value=0.0,
            labels={
                "state": "not_cacheable",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_hits_total",
            value=0.0,
            labels={
                "state": "changed",
                "protocol": "http"
            },
        )
        yield metric

        # Transaction count, misses
        metric = Metric(
            "trafficserver_transaction_misses_total",
            "Transaction miss counts.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_transaction_misses_total",
            value=float(
                data["proxy.process.http.transaction_counts.miss_cold"]),
            labels={
                "state": "cold",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_misses_total",
            value=float(data[
                "proxy.process.http.transaction_counts.miss_not_cacheable"]),
            labels={
                "state": "not_cacheable",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_misses_total",
            value=float(
                data["proxy.process.http.transaction_counts.miss_changed"]),
            labels={
                "state": "changed",
                "protocol": "http"
            },
        )
        # Zero labels (hits)
        metric.add_sample(
            "trafficserver_transaction_misses_total",
            value=0.0,
            labels={
                "state": "fresh",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_misses_total",
            value=0.0,
            labels={
                "state": "revalidated",
                "protocol": "http"
            },
        )
        yield metric

        # Transaction count, errors
        metric = Metric(
            "trafficserver_transaction_errors_total",
            "Transaction error counts.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_transaction_errors_total",
            value=float(data[("proxy.process.http.transaction_counts.errors."
                              "aborts")]),
            labels={
                "state": "abort",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_errors_total",
            value=float(data[("proxy.process.http.transaction_counts.errors."
                              "possible_aborts")]),
            labels={
                "state": "possible_abort",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_errors_total",
            value=float(data[("proxy.process.http.transaction_counts.errors."
                              "connect_failed")]),
            labels={
                "state": "connect_failed",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_transaction_errors_total",
            value=float(data[("proxy.process.http.transaction_counts.errors."
                              "other")]),
            labels={
                "state": "other",
                "protocol": "http"
            },
        )
        yield metric

        # Transaction count, others
        metric = Metric(
            "trafficserver_transaction_others_total",
            "Transaction other/unclassified counts.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_transaction_others_total",
            value=float(data[("proxy.process.http.transaction_counts.other."
                              "unclassified")]),
            labels={
                "state": "unclassified",
                "protocol": "http"
            },
        )
        yield metric

        # HTTP Responses
        metric = Metric("trafficserver_responses_total", "Response count.",
                        "counter")
        for code in TS_RESPONSE_CODES:
            key = "proxy.process.http.{code}_responses".format(code=code)
            metric.add_sample(
                "trafficserver_responses_total",
                value=float(data[key]),
                labels={
                    "code": code,
                    "protocol": "http"
                },
            )
        yield metric

        # HTTP Requests
        metric = Metric("trafficserver_requests_total", "Request count.",
                        "counter")
        for method in HTTP_VERBS_LOWER:
            key = "proxy.process.http.{method}_requests".format(method=method)
            metric.add_sample(
                "trafficserver_requests_total",
                value=float(data[key]),
                labels={
                    "method": method,
                    "protocol": "http"
                },
            )
        yield metric

        # Invalid requests
        metric = Metric(
            "trafficserver_client_requests_invalid_total",
            "Invalid client requests.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_client_requests_invalid_total",
            value=float(data["proxy.process.http.invalid_client_requests"]),
            labels={"protocol": "http"},
        )
        yield metric

        # Requests without Host header
        metric = Metric(
            "trafficserver_client_requests_missing_host_hdr_total",
            "Client requests missing host header.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_client_requests_missing_host_hdr_total",
            value=float(data["proxy.process.http.missing_host_hdr"]),
            labels={"protocol": "http"},
        )
        yield metric

        # Request size
        metric = Metric(
            "trafficserver_request_size_bytes_total",
            "Request size in bytes.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_request_size_bytes_total",
            value=float(
                data["proxy.node.http.user_agent_total_request_bytes"]),
            labels={
                "source": "user_agent",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_request_size_bytes_total",
            value=float(
                data["proxy.node.http.origin_server_total_request_bytes"]),
            labels={
                "source": "origin_server",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_request_size_bytes_total",
            value=float(
                data["proxy.node.http.parent_proxy_total_request_bytes"]),
            labels={
                "source": "parent_proxy",
                "protocol": "http"
            },
        )
        yield metric

        # Response size
        metric = Metric(
            "trafficserver_response_size_bytes_total",
            "Response size in bytes.",
            "counter",
        )
        metric.add_sample(
            "trafficserver_response_size_bytes_total",
            value=float(
                data["proxy.node.http.user_agent_total_response_bytes"]),
            labels={
                "source": "user_agent",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_response_size_bytes_total",
            value=float(
                data["proxy.node.http.origin_server_total_response_bytes"]),
            labels={
                "source": "origin_server",
                "protocol": "http"
            },
        )
        metric.add_sample(
            "trafficserver_response_size_bytes_total",
            value=float(
                data["proxy.node.http.parent_proxy_total_response_bytes"]),
            labels={
                "source": "parent_proxy",
                "protocol": "http"
            },
        )
        yield metric

        #
        # Cache
        #
        # Gather all cache volumes for cache statistics
        volumes = set()
        for key in data:
            if key.startswith("proxy.process.cache.volume_"):
                m = CACHE_VOLUMES.match(key)
                volumes.add(int(m.group(1)))

        # Create all cache volume metrics
        for volume in volumes:
            for metric in self._parse_volume_metrics(data, volume):
                yield metric

        metric = Metric("trafficserver_ram_cache_misses_total",
                        "RAM cache miss count.", "counter")
        metric.add_sample(
            "trafficserver_ram_cache_misses_total",
            value=float(data["proxy.process.cache.ram_cache.misses"]),
            labels={},
        )
        yield metric

        metric = Metric(
            "trafficserver_ram_cache_avail_size_bytes_total",
            "RAM cache available in bytes.",
            "gauge",
        )
        metric.add_sample(
            "trafficserver_ram_cache_avail_size_bytes_total",
            value=float(data["proxy.process.cache.ram_cache.total_bytes"]),
            labels={},
        )
        yield metric

        metric = Metric(
            "trafficserver_ram_cache_used_bytes_total",
            "RAM cache used in bytes.",
            "gauge",
        )
        metric.add_sample(
            "trafficserver_ram_cache_used_bytes_total",
            value=float(data["proxy.process.cache.ram_cache.bytes_used"]),
            labels={},
        )
        yield metric