Ejemplo n.º 1
1
    def collect(self):
        metrics = self._get_metrics()

        if metrics:
            for k, v in metrics.items():
                metric = Metric(k, k, 'counter')
                labels = {}
                labels.update(self._labels)
                metric.add_sample(k, value=v, labels=labels)

                if metric.samples:
                    yield metric
                else:
                    pass
Ejemplo n.º 2
0
    def collect(self):
        with lock:
            log.info('collecting...')

            response = self.client.tickers()

            metric = Metric('market', 'crypto currency market metric values',
                            'gauge')

            print(response)

            # tick = response["tick"]
            #
            # best_buy_price = tick["bids"][0][0]
            # best_sell_price = tick["asks"][0][0]
            #

            for k, v in response.items():
                metric.add_sample(k, value=v, labels={
                    "currency": "bitcoin",
                })

            # metric.add_sample(
            #     "bitcoin_market",
            #     value=sum([best_buy_price, best_sell_price]) / 2.0,
            #     labels={
            #         "currency": "bitcoin",
            #         "type": "spot",
            #         "id": "bitcoin",
            #     })

            yield metric
    def collect(self):

        r = requests.get('%s/system/health/v1/units' % self._url,
                         headers={'Authorization': 'token=' + self._token},
                         verify=False)

        # if failed, refresh token
        if r.status_code == 401:
            logging.info("Failed auth, getting new auth token")
            self.get_token()
            self.collect()
        else:
            healthmetrics = r.json()
            for hm in healthmetrics['units']:
                logging.info(hm)
                hm_removed_periods = hm[u'id'].replace(".", "_")
                hm_removed_dashes = hm_removed_periods.replace("-", "_")
                metric = Metric(hm_removed_dashes, '', 'gauge')
                metric.add_sample(hm_removed_dashes,
                                  value=hm[u'health'],
                                  labels={
                                      'name': hm[u'name'],
                                      'desc': hm[u'description']
                                  })
                yield metric
                logging.info("%s:%d" % (hm[u'id'], hm[u'health']))
 def collect(self):
     jsondata = collect(self.jsondata)
     metric = Metric(self.metric_name, "{}_{}".format(self.metric_name, self.hostname), 'gauge')
     metric.add_sample(self.metric_name, value=jsondata['data']['data']['value'],
                       labels={ 'host': self.hostname, '{}_subvalue'.format(self.metric_name): 'None',
                       })
     yield metric
Ejemplo n.º 5
0
 def collect(self):
     # Fetch the JSON
     peer_info = merged_peer_info(self.wg)
     peer_metadata = get_peer_metadata()
     for iface in peer_info:
         metric = Metric(f"interface_info_{iface['iface']}",
                         'interface_information', 'summary')
         for peer in iface['peers']:
             peer.update(peer_metadata.get(peer['public_key'], {}))
             for k, v in peer.items():
                 if k not in [
                         'latency_ms', 'packet_loss', 'rx_bytes', 'tx_bytes'
                 ]:
                     continue
                 metric.add_sample(f"iface_information_{k}",
                                   value=str(v),
                                   labels={
                                       'hostname':
                                       os.environ.get(
                                           'SYNTROPY_AGENT_NAME',
                                           socket.gethostname()),
                                       'ifname':
                                       iface['iface'],
                                       'peer':
                                       peer['public_key'],
                                       'internal_ip':
                                       peer['internal_ip'],
                                       "device_id":
                                       peer.get('device_id'),
                                       "device_name":
                                       peer.get('device_name'),
                                       "device_public_ipv4":
                                       peer.get('device_public_ipv4')
                                   })
         yield metric
Ejemplo n.º 6
0
 def collect(self):
   start = 1
   while True:
       # query the api
       print('starting with %d' % (start))
       r = requests.get('%s?convert=BTC&start=%d' % (self.endpoint, start))
       request_time = r.elapsed.total_seconds()
       log.info('elapsed time -' + str(request_time))
       response = json.loads(r.content.decode('UTF-8'))
       if not response['data']:
           break
       # setup the metric
       metric = Metric('coinmarketcap_response_time', 'Total time for the coinmarketcap API to respond.', 'summary')
       # add the response time as a metric
       metric.add_sample('coinmarketcap_response_time', value=float(request_time), labels={'name': 'coinmarketcap.com'})
       yield metric
       metric = Metric('coin_market', 'coinmarketcap metric values', 'gauge')
       for _, value in response['data'].items():
           for that in ['rank', 'total_supply', 'max_supply', 'circulating_supply']:
               coinmarketmetric = '_'.join(['coin_market', that])
               if value[that] is not None:
                   metric.add_sample(coinmarketmetric, value=float(value[that]), labels={'id': value['website_slug'], 'name': value['name'], 'symbol': value['symbol']})
           for price in ['USD', 'BTC']:
               for that in ['price', 'volume_24h', 'market_cap', 'percent_change_1h', 'percent_change_24h', 'percent_change_7d']:
                   coinmarketmetric = '_'.join(['coin_market', that, price]).lower()
                   if value['quotes'][price] is None:
                       continue
                   if value['quotes'][price][that] is not None:
                       metric.add_sample(coinmarketmetric, value=float(value['quotes'][price][that]), labels={'id': value['website_slug'], 'name': value['name'], 'symbol': value['symbol']})
       yield metric
       start += len(response['data'])
    def collect(self):
        # Fetch the JSON
        #response = json.loads(requests.get(self._endpoint).content.decode('UTF-8'))
        # fetch json from file
        json_data = open("./indices").read()

        #This function will summarize each index pattern. The return would be a list with each indice group
        #For example, for a group of indice called customer-metrics-*, a regex like '(.*-metrics).*' will return 'customer-metrics'
        indicesGroups = summarizeIndices(json_data, indicesPattern)

        #Call the function sum the total size in bytes of each indiceGroup. The return will be a dict with 1st group of match and they values
        indicesGroupSizes = collectIndicesGroupSizes(indicesGroups, json_data,
                                                     indicesPattern)

        #Metric es_group_indices_size
        metric = Metric('es_group_indices_size', 'Size of a group of indices',
                        'gauge')
        #Run the indicesGroupSizes dict and add those infos at metric
        for labelValue, sizeValue in indicesGroupSizes.items():
            metric.add_sample('es_group_indices_size',
                              value=sizeValue,
                              labels={'group': labelValue})

        #Exporse the metric
        yield metric
Ejemplo n.º 8
0
    def collect(self):

        url = self._endpoint
        response = json.loads(requests.get(url).content.decode('UTF-8'))
        json_all_bgp = response['value']
        metric = Metric('bgp_state_info', '', 'gauge')
        for i in range(len(json_all_bgp)):
            cbgp = json_all_bgp[i]['value']
            conn = json_all_bgp[i]['name'].split(':')
            snode = conn[4]
            dnode = conn[9]
            if ('BgpPeerInfoData' in cbgp
                    and 'state_info' in cbgp['BgpPeerInfoData']):
                metric.add_sample(
                    'bgp_state_info',
                    value=1,
                    labels={
                        'bgp_last_state':
                        cbgp['BgpPeerInfoData']['state_info']['last_state'],
                        'bgp_state':
                        cbgp['BgpPeerInfoData']['state_info']['state'],
                        'control_node':
                        snode,
                        'peer':
                        dnode
                    })
        yield metric
Ejemplo n.º 9
0
    def collect(self):
        response = self._get_metrics()

        if self._exclude:
            self.filter_exclude()

        for i in self._metric_collect:
            metric = Metric(i['name'], i['name'], i['metric_type'])

            for m in i['collect']:
                labels = {}
                labels.update(self._labels)
                labels.update({'original_metric': m['metric_name']})
                if m.get('label'):
                    labels.update(m['label'])
                try:
                    metric.add_sample(i['name'],
                                      value=response[m['metric_name']],
                                      labels=labels)
                except KeyError:
                    pass
            if metric.samples:
                yield metric
            else:
                pass
Ejemplo n.º 10
0
 def parse(self, t: tuple, metrics: list, endpoint, label={}) -> list:
     NoneType = type(None)
     if isinstance(t[1], (int, float, bool, str, NoneType)):
         mtr_d = {}
         if self._show_type:
             k = self.correct_metric_name(t[0] + '_value')
         else:
             k = self.correct_metric_name(t[0])
         v = t[1]
         mtr_d = self.parse_base_turple((k, v), endpoint, label=label)
         metric = Metric(mtr_d['name'], '', 'gauge')
         metric.add_sample(self._prefix + mtr_d['name'],
                           value=mtr_d['value'],
                           labels=mtr_d['labels'])
         metrics.append(metric)
     if isinstance(t[1], list):
         cnt = 0
         for i in t[1]:
             l = {"index": str(t[1].index(i))}
             name = f'{t[0]}_{cnt}'
             if self._show_type:
                 name += '_list'
             self.parse((name, i), metrics, endpoint, label=l)
             cnt += 1
     if isinstance(t[1], dict):
         for i in t[1].items():
             name = t[0]
             if self._show_type:
                 name += '_dict_'
             else:
                 name += '_'
             self.parse((name + i[0], i[1]), metrics, endpoint)
Ejemplo n.º 11
0
  def collect(self):
    print "came here"
    #arg=requests.get(self._endpoint)

    #if self.option == 'ram':    #statistics of ram
    tot_m, used_m, free_m, s,b,c = map(int, os.popen('free -t -m').readlines()[1].split()[1:])
    print ''
    print 'total ram memory: ',tot_m, 'MB'
    print 'used ram memory: ',used_m, 'MB'
    print 'free ram memory: ',free_m, 'MB'
    print ''

    # Convert requests and duration to a summary in seconds
    metric = Metric('svc_requests_duration_seconds',
        'Requests time taken in seconds', 'summary')
    metric.add_sample('total_ram',
        value=tot_m, labels={})
    metric.add_sample('ram_used',
        value=used_m, labels={})
    yield metric

    # Counter for the failures
    metric = Metric('svc_requests_failed_total',
       'Requests failed', 'summary')
    metric.add_sample('ram_free',
       value=free_m, labels={})
    yield metric
Ejemplo n.º 12
0
 def collect(self):
     # Fetch the JSON
     response = json.loads(
         requests.get(self._endpoint).content.decode('UTF-8'))
     # Metrics with labels for the documents loaded
     metric = Metric('svc_fps', 'Requests failed', 'gauge')
     metric.add_sample('svc_fps', value=response, labels={})
     yield metric
Ejemplo n.º 13
0
 def collect(self):
   with lock:
     # query the api
     response = self.client.quote()
     metric = Metric('coin_market_quote', 'coinmarketcap quote', 'gauge')
     coinmarketmetric = f'coin_market_quote_{currency}'
     quote = response['data'][self.symbol]['quote'][currency]
     metric.add_sample(coinmarketmetric, value=float(quote['price']), labels={'symbol': self.symbol})
     yield metric
Ejemplo n.º 14
0
 def _get_scrape_duration_metric(self, start_time):
     metric = Metric(
         'trafficserver_scrape_duration_seconds',
         'Time the Traffic Server scrape took, in seconds.',
         'gauge')
     metric.add_sample(
         'trafficserver_scrape_duration_seconds',
         value=time.time() - start_time,
         labels={})
     return metric
 def getMetrics(self,port):
     metrics=[]
     data={"fast": random.random(),"slow": random.random()} 
     for index in range(100):
         metric = Metric("svcs_"+str(index)+"_"+ str(port)+"_documents_loaded", 'Help text', 'gauge')
         for k, v in data.items():
            metric.add_sample("svcs_"+str(index)+"_"+ str(port)+"_documents_loaded",value=v, labels={'repository': k})
         metrics.append(metric)   
         #metrics.append(GaugeMetricFamily("svcs_"+str(index)+"_"+ str(port)+"_documents_loaded", 'Help text', value=random.random()))
       
     return metrics  
Ejemplo n.º 16
0
 def _get_scrape_duration_metric(self, start_time):
     metric = Metric(
         "trafficserver_scrape_duration_seconds",
         "Time the Traffic Server scrape took, in seconds.",
         "gauge",
     )
     metric.add_sample(
         "trafficserver_scrape_duration_seconds",
         value=time.time() - start_time,
         labels={},
     )
     return metric
Ejemplo n.º 17
0
    def collect(self):
        # define environment variables
        zabbix_exp_url = os.environ.get('ZABBIX_EXP_URL', 'http://localhost/')
        zabbix_exp_username = os.environ.get('ZABBIX_EXP_USERNAME', 'Admin')
        zabbix_exp_password = os.environ.get('ZABBIX_EXP_PASSWORD', 'zabbix')

        zapi = ZabbixAPI(zabbix_exp_url)
        zapi.login(zabbix_exp_username, zabbix_exp_password)

        # create a prometheus metric
        metric = Metric('zabbix_warning', 'Current Zabbix Warning Count',
                        'gauge')
        # Get a list of all issues (AKA tripped triggers)

        triggers = zapi.trigger.get(
            only_true=1,
            skipDependent=1,
            monitored=1,
            active=1,
            output='extend',
            expandDescription=1,
            selectHosts=['host'],
        )

        # Do another query to find out which issues are Unacknowledged
        unack_triggers = zapi.trigger.get(
            only_true=1,
            skipDependent=1,
            monitored=1,
            active=1,
            output='extend',
            expandDescription=1,
            selectHosts=['host'],
            withLastEventUnacknowledged=1,
        )

        unack_trigger_ids = [t['triggerid'] for t in unack_triggers]

        for t in triggers:
            t['unacknowledged'] = True if t['triggerid'] in unack_trigger_ids \
                else False

        # Print a list containing only "tripped" triggers
        # Sum triggers which value is 1
        warn_cnt = 0
        for t in triggers:
            if int(t['value']) == 1:
                warn_cnt += 1

        # append data to the metric
        metric.add_sample('zabbix_warning', value=int(warn_cnt), labels={})
        yield metric
Ejemplo n.º 18
0
    def collect(self):
        self.metrics = self._get_metrics()

        if self.metrics:
            for k, v in self.metrics.items():
                metric = Metric(k, k, 'gauge')
                labels = {}
                metric.add_sample(k, value=v, labels=labels)

                if metric.samples:
                    yield metric
                else:
                    pass
Ejemplo n.º 19
0
 def collect(self):
     try:
         for metrics in mysql_metrices.mysql_metrices:
             command = os.popen(metrics['command']).read()
             metric = Metric(metrics['name'], metrics['desc'],
                             metrics['type'])
             if metrics['data_type'] == 'integer':
                 metric.add_sample(metrics['name'],
                                   value=int(command),
                                   labels={})
             yield metric
     except Exception as err:
         print(err)
Ejemplo n.º 20
0
    def collect(self):
        metrics = self._get_metrics()

        if metrics:
            for k, v in metrics.items():
                metric = Metric(k, k, 'counter')
                labels = {}
                labels.update(self._labels)
                metric.add_sample(k, value=v, labels=labels)

                if metric.samples:
                    yield metric
                else:
                    pass
Ejemplo n.º 21
0
    async def get_wallet_balance(self):
        name = "get_wallet_balance"
        start = time.time()
        print(name + ": Starting")

        client = await WalletRpcClient.create("host.docker.internal", 9256,
                                              Path("/root/.chia/mainnet"),
                                              self.config)
        wallets = await client.get_wallets()
        height_info = await client.get_height_info()

        metric1 = Metric('chia_wallet_balance', "Wallet balance", "summary")
        for wallet in wallets:
            results = await client.get_wallet_balance(wallet["id"])
            metric1.add_sample('chia_wallet_balance',
                               value=results["confirmed_wallet_balance"],
                               labels={"wallet_id": str(wallet["id"])})

        metric2 = Metric('chia_wallet_height', 'Block Height of Chia Wallet',
                         'summary')
        metric2.add_sample('chia_wallet_height', value=height_info, labels={})

        client.close()

        print(name + ": Done in " + str(time.time() - start) + " seconds")
        return [metric1, metric2]
Ejemplo n.º 22
0
    def collect(self):
        cache_keys = {
            'total_csize': 'Compressed size',
            'total_size': 'Original size',
            'unique_csize': 'Deduplicated size'
        }
        repo_keys = {'last_modified': 'Last backup date'}

        for d in self.dirs:
            host = basename(d)
            data = json.loads(
                subprocess.check_output(['/usr/bin/borg', 'info', d,
                                         '--json']))
            stats = data['cache']['stats']
            for key, desc in cache_keys.items():
                mkey = f'borg_{key}'
                metric = Metric(mkey, desc, 'gauge')
                metric.add_sample(mkey,
                                  value=stats[key],
                                  labels={'host': host})
                yield metric

            repo = data['repository']
            for key, desc in repo_keys.items():
                mkey = f'borg_{key}'
                metric = Metric(mkey, desc, 'gauge')
                diff = datetime.now() - datetime.fromisoformat(repo[key])
                value = 0 if diff.days < 0 else diff.days
                metric.add_sample(mkey, value=value, labels={'host': host})
                yield metric
Ejemplo n.º 23
0
 def collect(self):
     # query the api
     r = requests.get(self.endpoint)
     request_time = r.elapsed.total_seconds()
     log.info('elapsed time -' + str(request_time))
     response = json.loads(r.text[r.text.index("(") + 1:r.text.rindex(")")])
     metric = Metric(
         'spotinstance_api_response_time',
         'Total time for the AWS Spot Instance API to respond.', 'summary')
     # add the response time as a metric
     metric.add_sample('spotinstance_api_response_time',
                       value=float(request_time),
                       labels={'name': 'AWS Spot Instance Pricing API'})
     yield metric
     metric = Metric('spotinstance', 'spot instance pricing', 'gauge')
     # each['region'] = us-east
     for each in response['config']['regions']:
         # each['sizes'] = list of instance sizes
         for that in each['instanceTypes']:
             for it in that['sizes']:
                 if it['valueColumns'][0]['prices']['USD'] != 'N/A*':
                     metric.add_sample(
                         'spotinstance',
                         value=float(
                             it['valueColumns'][0]['prices']['USD']),
                         labels={
                             'region': each['region'],
                             'size': it['size'].replace('.', '_')
                         })
     yield metric
Ejemplo n.º 24
0
    def collect(self):

        # Expose the metric
        # Create header
        metric = Metric('aws_project_cost',
                        'Total amount of costs for project', 'gauge')

        # Run the retuned dictionary and expose the metrics
        for cost in getCosts():
            metric.add_sample('aws_project_cost', value=cost['value'],
                              labels={'app': cost['app'], 'product': cost['product']})

        # /Expose the metric
        yield metric
Ejemplo n.º 25
0
  def collect(self):

    # get metric about control nodes
    url = self._endpoint

    # Fetch the JSON
    response = json.loads(requests.get(url).content.decode('UTF-8'))
    json_all_cfdbnode = response['value']
    number_cfdbnode = len(json_all_cfdbnode)
    
    # Add metric system_mem_usage_used
    
    metric = Metric('contrail_status', '', 'gauge')
    for i in range(number_cfdbnode):
      current_json_cfdbnode = json_all_cfdbnode[i]['value']
      current_metric = current_json_cfdbnode['NodeStatus']['process_info']
      for k in range(len(current_metric)):
        metric.add_sample('contrail_status', value = 1, labels = {
            'process_name': current_metric[k]['process_name'],
            'process_state': current_metric[k]['process_state'],
            'last_start_time': current_metric[k]['last_start_time'],
            'config_node': json_all_cfdbnode[i]['name']
          })
    yield metric

    metric = Metric('config_node_avl', 'List of config databases node available', 'gauge')
    for i in range(number_cfdbnode):
      metric.add_sample('config_node_avl', value = 1, labels = {
        'config_host': json_all_cfdbnode[i]['name']
        })
    yield metric
Ejemplo n.º 26
0
    async def get_pricing(self):
        name = "get_pricing"
        start = time.time()
        print(name + ": Starting")

        try:
            resp = requests.get(
                "https://coinmarketcap.com/currencies/chia-network/")
            soup = BeautifulSoup(resp.content, features="lxml")
            usd_price = float(
                soup.find_all("div",
                              class_="priceValue___11gHJ")[0].text[1:].replace(
                                  ",", ""))
            vol_24hr = float(
                soup.find_all("div",
                              class_="statsValue___2iaoZ")[2].text[1:].replace(
                                  ",", ""))

            metric1 = Metric('chia_usd_price', 'Chia USD Price', "summary")
            metric1.add_sample('chia_usd_price', value=usd_price, labels={})
            metric2 = Metric('chia_24hr_volume_usd',
                             'Chia 24 Hour Volume Traded', "summary")
            metric2.add_sample('chia_24hr_volume_usd',
                               value=vol_24hr,
                               labels={})
        except Exception as e:
            print(name + ": Failed to run. Error follows")
            print(e)
            return []

        print(name + ": Done in " + str(time.time() - start) + " seconds")
        return [metric1, metric2]
    def collect(self):
        cache_keys = {
            "total_csize": "Compressed size",
            "total_size": "Original size",
            "unique_csize": "Deduplicated size",
        }
        repo_keys = {"last_modified": "Last backup date"}

        for d in self.dirs:
            host = basename(d)
            data = json.loads(
                subprocess.check_output(["/usr/bin/borg", "info", d, "--json"],
                                        input=b"y"))
            stats = data["cache"]["stats"]
            for key, desc in cache_keys.items():
                mkey = f"borg_{key}"
                metric = Metric(mkey, desc, "gauge")
                metric.add_sample(mkey,
                                  value=stats[key],
                                  labels={"host": host})
                yield metric

            repo = data["repository"]
            for key, desc in repo_keys.items():
                mkey = f"borg_{key}"
                metric = Metric(mkey, desc, "gauge")
                diff = datetime.now() - datetime.strptime(
                    repo[key], "%Y-%m-%dT%H:%M:%S.%f")
                value = 0 if diff.seconds < 0 else diff.seconds
                metric.add_sample(mkey, value=value, labels={"host": host})
                yield metric
 def collect(self):
     # query the api
     r = requests.get(self.endpoint)
     request_time = r.elapsed.total_seconds()
     log.info('elapsed time -' + str(request_time))
     response = json.loads(r.content.decode('UTF-8'))
     # setup the metric
     metric = Metric('coinmarketcap_response_time',
                     'Total time for the coinmarketcap API to respond.',
                     'summary')
     # add the response time as a metric
     metric.add_sample('coinmarketcap_response_time',
                       value=float(request_time),
                       labels={'name': 'coinmarketcap.com'})
     yield metric
     metric = Metric('coin_market', 'coinmarketcap metric values', 'gauge')
     for each in response:
         for that in [
                 'rank', 'price_usd', 'price_btc', '24h_volume_usd',
                 'market_cap_usd', 'available_supply', 'total_supply',
                 'percent_change_1h', 'percent_change_24h',
                 'percent_change_7d'
         ]:
             coinmarketmetric = '_'.join(['coin_market', that])
             if each[that] is not None:
                 metric.add_sample(coinmarketmetric,
                                   value=float(each[that]),
                                   labels={
                                       'id': each['id'],
                                       'name': each['name'],
                                       'symbol': each['symbol']
                                   })
     yield metric
Ejemplo n.º 29
0
  def collect(self):
    Symbols = [ 'GOOG', 'CSCO', 'BABA', 'APPL', 'IBM', 'GLOB' ]
    #Symbols = [ 'GOOG' ]

    strSymbols = cat_to_string(Symbols)
    JSp = GoogleFinanceAPI()
   
    if JSp.get(strSymbols):
        #JSp.Quotes2Stdout()	# // Show a little data, just for testing
        JSp.JsonQot2Obj()
        metric = Metric('stock_quotes', 'stock quotes last price', 'gauge')
        for quote in JSp.QuotesList:
            # Convert quotes to metric
            metric.add_sample('stock_quotes', value=float(quote.Last), labels={'symbol': quote.Symbol})
        yield metric	
    def collect(self):

        #Expose the metric
        #Create header
        metric = Metric('aws_project_cost',
                        'Total amount of costs for project', 'gauge')

        #Run the retuned dictionary and expose the metrics
        for project, cost in getCosts().items():
            metric.add_sample('aws_project_cost',
                              value=cost,
                              labels={'project': project})

        #/Expose the metric
        yield metric
Ejemplo n.º 31
0
 def collect(self):
     beanstalk = beanstalkc.Connection(host=beanstalkd_host,
                                       port=int(beanstalkd_port))
     data = {}
     mylist = []
     for tube in beanstalk.tubes():
         mydict = beanstalk.stats_tube(tube)
         for key in mydict.keys():
             newstr = "beanstalkd_" + key.replace('-', '_')
             metric = Metric(newstr, newstr, 'gauge')
             if not type(mydict[key]) == str:
                 metric.add_sample(newstr,
                                   value=mydict[key],
                                   labels={'queue': tube})
                 yield metric
Ejemplo n.º 32
0
    def _parse_volume_metrics(self, data, volume):
        metric = Metric(
            'trafficserver_cache_avail_size_bytes_total',
            'Total cache available.',
            'gauge')
        metric.add_sample(
            'trafficserver_cache_avail_size_bytes_total',
            value=data[('proxy.process.cache.volume_{0}.'
                        'bytes_used').format(volume)],
            labels={'volume': str(volume)})
        yield metric

        metric = Metric(
            'trafficserver_cache_used_bytes_total',
            'Total cache used in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_cache_used_bytes_total',
            value=data[('proxy.process.cache.volume_{0}.'
                        'bytes_total').format(volume)],
            labels={'volume': str(volume)})
        yield metric

        metric = Metric(
            'trafficserver_cache_operations_total',
            'Cache operation count.',
            'counter')
        for op in ('lookup', 'read', 'write', 'update', 'remove',
                   'evacuate', 'scan', 'read_busy'):
            for result in ('success', 'failure'):
                k = 'proxy.process.cache.volume_{volume}.{op}.{result}'.format(
                    volume=volume, op=op, result=result)
                metric.add_sample(
                    'trafficserver_cache_operations_total',
                    value=data[k],
                    labels={'volume': str(volume),
                            'operation': op,
                            'result': result})
        yield metric
Ejemplo n.º 33
0
    def parse_metrics(self, data):
        """Generator for trafficserver metrics."""
        # Counter for server restarts
        metric = Metric(
            'trafficserver_restart_count',
            'Count of traffic_server restarts.',
            'counter')
        metric.add_sample(
            'trafficserver_restart_count',
            value=data['proxy.node.restarts.proxy.restart_count'],
            labels={})
        yield metric

        #
        # HTTP
        #
        # Connections
        metric = Metric(
            'trafficserver_connections_total',
            'Connection count.',
            'counter')
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_client_connections'],
            labels={'source': 'client',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_server_connections'],
            labels={'source': 'server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_parent_proxy_connections'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        # Incoming requests
        metric = Metric(
            'trafficserver_requests_incoming',
            'Incoming requests.',
            'gauge')
        metric.add_sample(
            'trafficserver_requests_incoming',
            value=data['proxy.process.http.incoming_requests'],
            labels={'protocol': 'http'})
        yield metric

        # Client aborts
        metric = Metric(
            'trafficserver_error_client_aborts_total',
            'Client aborts.',
            'counter')
        metric.add_sample(
            'trafficserver_client_aborts_total',
            value=data['proxy.process.http.err_client_abort_count_stat'],
            labels={'protocol': 'http'})
        yield metric

        # Connect fails
        metric = Metric(
            'trafficserver_connect_failures_total',
            'Connect failures.',
            'counter')
        metric.add_sample(
            'trafficserver_connect_failures_total',
            value=data['proxy.process.http.err_connect_fail_count_stat'],
            labels={'protocol': 'http'})
        yield metric

        # Transaction count
        metric = Metric(
            'trafficserver_transactions_total',
            'Total transactions.',
            'counter')
        metric.add_sample(
            'trafficserver_transactions_total',
            value=data[('proxy.node.http.'
                        'user_agents_total_transactions_count')],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transactions_total',
            value=data[('proxy.node.http.'
                        'origin_server_total_transactions_count')],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, total
        metric = Metric(
            'trafficserver_transactions_time_ms_total',
            'Total transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_transactions_time_total',
            value=data['proxy.process.http.total_transactions_time'],
            labels={})
        yield metric

        # Transaction time spent, hits
        metric = Metric(
            'trafficserver_hit_transaction_time_ms_total',
            'Total cache hit transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_hit_transaction_time_ms_total',
            value=data['proxy.process.http.transaction_totaltime.hit_fresh'],
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_hit_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'hit_revalidated')],
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, misses
        metric = Metric(
            'trafficserver_miss_transaction_time_ms_total',
            'Total cache miss transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data['proxy.process.http.transaction_totaltime.miss_cold'],
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_not_cacheable')],
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_changed')],
            labels={'state': 'changed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_client_no_cache')],
            labels={'state': 'no_cache',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, errors
        metric = Metric(
            'trafficserver_error_transaction_time_ms_total',
            'Total cache error transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'aborts')],
            labels={'state': 'abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'possible_aborts')],
            labels={'state': 'possible_abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'connect_failed')],
            labels={'state': 'connect_failed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'other')],
            labels={'state': 'other',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, other
        metric = Metric(
            'trafficserver_other_transaction_time_ms_total',
            'Total other/unclassified transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_other_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.other.'
                        'unclassified')],
            labels={'state': 'unclassified',
                    'protocol': 'http'})
        yield metric

        # Transaction count, hits
        metric = Metric(
            'trafficserver_transaction_hits_total',
            'Transaction hit counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'hit_fresh')],
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'hit_revalidated')],
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        # Zero labels (misses)
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'changed',
                    'protocol': 'http'})
        yield metric

        # Transaction count, misses
        metric = Metric(
            'trafficserver_transaction_misses_total',
            'Transaction miss counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_cold')],
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_not_cacheable')],
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_changed')],
            labels={'state': 'changed',
                    'protocol': 'http'})
        # Zero labels (hits)
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value='0',
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value='0',
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        yield metric

        # Transaction count, errors
        metric = Metric(
            'trafficserver_transaction_errors_total',
            'Transaction error counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'aborts')],
            labels={'state': 'abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'possible_aborts')],
            labels={'state': 'possible_abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'connect_failed')],
            labels={'state': 'connect_failed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'other')],
            labels={'state': 'other',
                    'protocol': 'http'})
        yield metric

        # Transaction count, others
        metric = Metric(
            'trafficserver_transaction_others_total',
            'Transaction other/unclassified counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_others_total',
            value=data[('proxy.process.http.transaction_counts.other.'
                        'unclassified')],
            labels={'state': 'unclassified',
                    'protocol': 'http'})
        yield metric

        # HTTP Responses
        metric = Metric(
            'trafficserver_responses_total',
            'Response count.',
            'counter')
        for code in TS_RESPONSE_CODES:
            key = 'proxy.process.http.{code}_responses'.format(code=code)
            metric.add_sample(
                'trafficserver_responses_total',
                value=data[key],
                labels={'code': code,
                        'protocol': 'http'})
        yield metric

        # HTTP Requests
        metric = Metric(
            'trafficserver_requests_total',
            'Request count.',
            'counter')
        for method in HTTP_VERBS_LOWER:
            key = 'proxy.process.http.{method}_requests'.format(method=method)
            metric.add_sample(
                'trafficserver_requests_total',
                value=data[key],
                labels={'method': method,
                        'protocol': 'http'})
        yield metric

        # Invalid requests
        metric = Metric(
            'trafficserver_client_requests_invalid_total',
            'Invalid client requests.',
            'counter')
        metric.add_sample(
            'trafficserver_client_requests_invalid_total',
            value=data['proxy.process.http.invalid_client_requests'],
            labels={'protocol': 'http'})
        yield metric

        # Requests without Host header
        metric = Metric(
            'trafficserver_client_requests_missing_host_hdr_total',
            'Client requests missing host header.',
            'counter')
        metric.add_sample(
            'trafficserver_client_requests_missing_host_hdr_total',
            value=data['proxy.process.http.missing_host_hdr'],
            labels={'protocol': 'http'})
        yield metric

        # Request size
        metric = Metric(
            'trafficserver_request_size_bytes_total',
            'Request size in bytes.',
            'counter')
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.user_agent_total_request_bytes'],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.origin_server_total_request_bytes'],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.parent_proxy_total_request_bytes'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        # Response size
        metric = Metric(
            'trafficserver_response_size_bytes_total',
            'Response size in bytes.',
            'counter')
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.user_agent_total_response_bytes'],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.origin_server_total_response_bytes'],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.parent_proxy_total_response_bytes'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        #
        # Cache
        #
        # Gather all cache volumes for cache statistics
        volumes = set()
        for key in data:
            if key.startswith('proxy.process.cache.volume_'):
                m = CACHE_VOLUMES.match(key)
                volumes.add(int(m.group(1)))

        # Create all cache volume metrics
        for volume in volumes:
            for metric in self._parse_volume_metrics(data, volume):
                yield metric

        metric = Metric(
            'trafficserver_ram_cache_hits_total',
            'RAM cache hit count.',
            'counter')
        metric.add_sample(
            'trafficserver_ram_cache_hits_total',
            value=data['proxy.process.cache.ram_cache.hits'],
            labels={'volume': str(volume)})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_misses_total',
            'RAM cache miss count.',
            'counter')
        metric.add_sample(
            'trafficserver_ram_cache_misses_total',
            value=data['proxy.process.cache.ram_cache.misses'],
            labels={})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_avail_size_bytes_total',
            'RAM cache available in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_ram_cache_avail_size_bytes_total',
            value=data['proxy.process.cache.ram_cache.total_bytes'],
            labels={})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_used_bytes_total',
            'RAM cache used in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_ram_cache_used_bytes_total',
            value=data['proxy.process.cache.ram_cache.bytes_used'],
            labels={})
        yield metric
Ejemplo n.º 34
0
 def collect(self):
     metric = Metric("nonnumber", "Non number", 'untyped')
     metric.add_sample("nonnumber", {}, MyNumber())
     yield metric