Exemplo n.º 1
1
    def collect(self):
        metrics = self._get_metrics()

        if metrics:
            for k, v in metrics.items():
                metric = Metric(k, k, 'counter')
                labels = {}
                labels.update(self._labels)
                metric.add_sample(k, value=v, labels=labels)

                if metric.samples:
                    yield metric
                else:
                    pass
Exemplo n.º 2
0
    def collect(self):
        pewdiepie = 'pewdiepie'
        tseries = 'tseries'
        youtube_key = 'YOUR_API_KEY'
        endpoint = 'https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername='******'YOUTUBE_HOST', endpoint)
        status = 'running'
        url = youtube_host + pewdiepie + '&key=' + youtube_key
        r = requests.get(url, verify=False)
        try:
            #create pewdiepie metric
            metric = Metric('subscriber_count', 'Current number of subs',
                            'gauge')
            metric.add_sample(
                'subscriber_count',
                value=((
                    r.json()["items"][0]["statistics"]["subscriberCount"])),
                labels={'Channel': 'PewDiePie'})
            # yield metric
        except:
            pass

        url = youtube_host + tseries + '&key=' + youtube_key
        r = requests.get(url, verify=False)
        try:
            #create tseries metric
            # metric = Metric('tseries_subs', 'Current number of non-bros', 'gauge')
            metric.add_sample(
                'subscriber_count',
                value=((
                    r.json()["items"][0]["statistics"]["subscriberCount"])),
                labels={'Channel': 'Tseries'})
            yield metric
        except:
            pass
    def collect(self):

        r = requests.get('%s/system/health/v1/units' % self._url,
                         headers={'Authorization': 'token=' + self._token},
                         verify=False)

        # if failed, refresh token
        if r.status_code == 401:
            logging.info("Failed auth, getting new auth token")
            self.get_token()
            self.collect()
        else:
            healthmetrics = r.json()
            for hm in healthmetrics['units']:
                logging.info(hm)
                hm_removed_periods = hm[u'id'].replace(".", "_")
                hm_removed_dashes = hm_removed_periods.replace("-", "_")
                metric = Metric(hm_removed_dashes, '', 'gauge')
                metric.add_sample(hm_removed_dashes,
                                  value=hm[u'health'],
                                  labels={
                                      'name': hm[u'name'],
                                      'desc': hm[u'description']
                                  })
                yield metric
                logging.info("%s:%d" % (hm[u'id'], hm[u'health']))
 def collect(self):
     jsondata = collect(self.jsondata)
     metric = Metric(self.metric_name, "{}_{}".format(self.metric_name, self.hostname), 'gauge')
     metric.add_sample(self.metric_name, value=jsondata['data']['data']['value'],
                       labels={ 'host': self.hostname, '{}_subvalue'.format(self.metric_name): 'None',
                       })
     yield metric
  def collect(self):

    # get metric about control nodes

    url = self._endpoint

    # Fetch the JSON
    response = json.loads(requests.get(url).content.decode('UTF-8'))

    metric = Metric('control_node_metrics', 'metrics for control nodes', 'summary')

    for entry in response['value']:
      name = entry['name']
      if ('NodeStatus' in entry['value']):
        tmp = entry['value']['NodeStatus']
        
        system_mem_usage = tmp['system_mem_usage']
        system_cpu_info = tmp['system_cpu_info']
        for k in system_mem_usage:
          if (k == 'node_type'):
            print ('not')
            continue
          print(k,type(k))
          metric.add_sample('system_mem_usage_'+k, value=system_mem_usage[k], labels={"host_name": name})
        
        for k in system_cpu_info:
          metric.add_sample('system_cpu_info_'+k, value=system_cpu_info[k], labels={"host_name": name})  

        # Export metric
        yield metric
Exemplo n.º 6
0
  def collect(self):

    # get metric about control nodes
    url = self._endpoint

    # Fetch the JSON
    response = json.loads(requests.get(url).content.decode('UTF-8'))
    json_all_cfdbnode = response['value']
    number_cfdbnode = len(json_all_cfdbnode)
    
    # Add metric system_mem_usage_used
    
    metric = Metric('contrail_status', '', 'gauge')
    for i in range(number_cfdbnode):
      current_json_cfdbnode = json_all_cfdbnode[i]['value']
      current_metric = current_json_cfdbnode['NodeStatus']['process_info']
      for k in range(len(current_metric)):
        metric.add_sample('contrail_status', value = 1, labels = {
            'process_name': current_metric[k]['process_name'],
            'process_state': current_metric[k]['process_state'],
            'last_start_time': current_metric[k]['last_start_time'],
            'config_node': json_all_cfdbnode[i]['name']
          })
    yield metric

    metric = Metric('config_node_avl', 'List of config databases node available', 'gauge')
    for i in range(number_cfdbnode):
      metric.add_sample('config_node_avl', value = 1, labels = {
        'config_host': json_all_cfdbnode[i]['name']
        })
    yield metric
Exemplo n.º 7
0
 def parse(self, t: tuple, metrics: list, endpoint, label={}) -> list:
     NoneType = type(None)
     if isinstance(t[1], (int, float, bool, str, NoneType)):
         mtr_d = {}
         if self._show_type:
             k = self.correct_metric_name(t[0] + '_value')
         else:
             k = self.correct_metric_name(t[0])
         v = t[1]
         mtr_d = self.parse_base_turple((k, v), endpoint, label=label)
         metric = Metric(mtr_d['name'], '', 'gauge')
         metric.add_sample(self._prefix + mtr_d['name'],
                           value=mtr_d['value'],
                           labels=mtr_d['labels'])
         metrics.append(metric)
     if isinstance(t[1], list):
         cnt = 0
         for i in t[1]:
             l = {"index": str(t[1].index(i))}
             name = f'{t[0]}_{cnt}'
             if self._show_type:
                 name += '_list'
             self.parse((name, i), metrics, endpoint, label=l)
             cnt += 1
     if isinstance(t[1], dict):
         for i in t[1].items():
             name = t[0]
             if self._show_type:
                 name += '_dict_'
             else:
                 name += '_'
             self.parse((name + i[0], i[1]), metrics, endpoint)
Exemplo n.º 8
0
    def collect(self):
        with lock:
            log.info('collecting...')

            response = self.client.tickers()

            metric = Metric('market', 'crypto currency market metric values',
                            'gauge')

            print(response)

            # tick = response["tick"]
            #
            # best_buy_price = tick["bids"][0][0]
            # best_sell_price = tick["asks"][0][0]
            #

            for k, v in response.items():
                metric.add_sample(k, value=v, labels={
                    "currency": "bitcoin",
                })

            # metric.add_sample(
            #     "bitcoin_market",
            #     value=sum([best_buy_price, best_sell_price]) / 2.0,
            #     labels={
            #         "currency": "bitcoin",
            #         "type": "spot",
            #         "id": "bitcoin",
            #     })

            yield metric
Exemplo n.º 9
0
    async def get_wallet_balance(self):
        name = "get_wallet_balance"
        start = time.time()
        print(name + ": Starting")

        client = await WalletRpcClient.create("host.docker.internal", 9256,
                                              Path("/root/.chia/mainnet"),
                                              self.config)
        wallets = await client.get_wallets()
        height_info = await client.get_height_info()

        metric1 = Metric('chia_wallet_balance', "Wallet balance", "summary")
        for wallet in wallets:
            results = await client.get_wallet_balance(wallet["id"])
            metric1.add_sample('chia_wallet_balance',
                               value=results["confirmed_wallet_balance"],
                               labels={"wallet_id": str(wallet["id"])})

        metric2 = Metric('chia_wallet_height', 'Block Height of Chia Wallet',
                         'summary')
        metric2.add_sample('chia_wallet_height', value=height_info, labels={})

        client.close()

        print(name + ": Done in " + str(time.time() - start) + " seconds")
        return [metric1, metric2]
Exemplo n.º 10
0
    def collect(self):

        url = self._endpoint
        response = json.loads(requests.get(url).content.decode('UTF-8'))
        json_all_bgp = response['value']
        metric = Metric('bgp_state_info', '', 'gauge')
        for i in range(len(json_all_bgp)):
            cbgp = json_all_bgp[i]['value']
            conn = json_all_bgp[i]['name'].split(':')
            snode = conn[4]
            dnode = conn[9]
            if ('BgpPeerInfoData' in cbgp
                    and 'state_info' in cbgp['BgpPeerInfoData']):
                metric.add_sample(
                    'bgp_state_info',
                    value=1,
                    labels={
                        'bgp_last_state':
                        cbgp['BgpPeerInfoData']['state_info']['last_state'],
                        'bgp_state':
                        cbgp['BgpPeerInfoData']['state_info']['state'],
                        'control_node':
                        snode,
                        'peer':
                        dnode
                    })
        yield metric
Exemplo n.º 11
0
    def collect(self):
        # Fetch the JSON
        #response = json.loads(requests.get(self._endpoint).content.decode('UTF-8'))
        # fetch json from file
        json_data = open("./indices").read()

        #This function will summarize each index pattern. The return would be a list with each indice group
        #For example, for a group of indice called customer-metrics-*, a regex like '(.*-metrics).*' will return 'customer-metrics'
        indicesGroups = summarizeIndices(json_data, indicesPattern)

        #Call the function sum the total size in bytes of each indiceGroup. The return will be a dict with 1st group of match and they values
        indicesGroupSizes = collectIndicesGroupSizes(indicesGroups, json_data,
                                                     indicesPattern)

        #Metric es_group_indices_size
        metric = Metric('es_group_indices_size', 'Size of a group of indices',
                        'gauge')
        #Run the indicesGroupSizes dict and add those infos at metric
        for labelValue, sizeValue in indicesGroupSizes.items():
            metric.add_sample('es_group_indices_size',
                              value=sizeValue,
                              labels={'group': labelValue})

        #Exporse the metric
        yield metric
Exemplo n.º 12
0
    def collect(self):
        cache_keys = {
            'total_csize': 'Compressed size',
            'total_size': 'Original size',
            'unique_csize': 'Deduplicated size'
        }
        repo_keys = {'last_modified': 'Last backup date'}

        for d in self.dirs:
            host = basename(d)
            data = json.loads(
                subprocess.check_output(['/usr/bin/borg', 'info', d,
                                         '--json']))
            stats = data['cache']['stats']
            for key, desc in cache_keys.items():
                mkey = f'borg_{key}'
                metric = Metric(mkey, desc, 'gauge')
                metric.add_sample(mkey,
                                  value=stats[key],
                                  labels={'host': host})
                yield metric

            repo = data['repository']
            for key, desc in repo_keys.items():
                mkey = f'borg_{key}'
                metric = Metric(mkey, desc, 'gauge')
                diff = datetime.now() - datetime.fromisoformat(repo[key])
                value = 0 if diff.days < 0 else diff.days
                metric.add_sample(mkey, value=value, labels={'host': host})
                yield metric
Exemplo n.º 13
0
    def collect(self):
        response = self._get_metrics()

        if self._exclude:
            self.filter_exclude()

        for i in self._metric_collect:
            metric = Metric(i['name'], i['name'], i['metric_type'])

            for m in i['collect']:
                labels = {}
                labels.update(self._labels)
                labels.update({'original_metric': m['metric_name']})
                if m.get('label'):
                    labels.update(m['label'])
                try:
                    metric.add_sample(i['name'],
                                      value=response[m['metric_name']],
                                      labels=labels)
                except KeyError:
                    pass
            if metric.samples:
                yield metric
            else:
                pass
Exemplo n.º 14
0
 def collect(self):
     # query the api
     r = requests.get(self.endpoint)
     request_time = r.elapsed.total_seconds()
     log.info('elapsed time -' + str(request_time))
     response = json.loads(r.text[r.text.index("(") + 1:r.text.rindex(")")])
     metric = Metric(
         'spotinstance_api_response_time',
         'Total time for the AWS Spot Instance API to respond.', 'summary')
     # add the response time as a metric
     metric.add_sample('spotinstance_api_response_time',
                       value=float(request_time),
                       labels={'name': 'AWS Spot Instance Pricing API'})
     yield metric
     metric = Metric('spotinstance', 'spot instance pricing', 'gauge')
     # each['region'] = us-east
     for each in response['config']['regions']:
         # each['sizes'] = list of instance sizes
         for that in each['instanceTypes']:
             for it in that['sizes']:
                 if it['valueColumns'][0]['prices']['USD'] != 'N/A*':
                     metric.add_sample(
                         'spotinstance',
                         value=float(
                             it['valueColumns'][0]['prices']['USD']),
                         labels={
                             'region': each['region'],
                             'size': it['size'].replace('.', '_')
                         })
     yield metric
 def collect(self):
     # query the api
     r = requests.get(self.endpoint)
     request_time = r.elapsed.total_seconds()
     log.info('elapsed time -' + str(request_time))
     response = json.loads(r.content.decode('UTF-8'))
     # setup the metric
     metric = Metric('coinmarketcap_response_time',
                     'Total time for the coinmarketcap API to respond.',
                     'summary')
     # add the response time as a metric
     metric.add_sample('coinmarketcap_response_time',
                       value=float(request_time),
                       labels={'name': 'coinmarketcap.com'})
     yield metric
     metric = Metric('coin_market', 'coinmarketcap metric values', 'gauge')
     for each in response:
         for that in [
                 'rank', 'price_usd', 'price_btc', '24h_volume_usd',
                 'market_cap_usd', 'available_supply', 'total_supply',
                 'percent_change_1h', 'percent_change_24h',
                 'percent_change_7d'
         ]:
             coinmarketmetric = '_'.join(['coin_market', that])
             if each[that] is not None:
                 metric.add_sample(coinmarketmetric,
                                   value=float(each[that]),
                                   labels={
                                       'id': each['id'],
                                       'name': each['name'],
                                       'symbol': each['symbol']
                                   })
     yield metric
Exemplo n.º 16
0
 def collect(self):
     # Fetch the JSON
     peer_info = merged_peer_info(self.wg)
     peer_metadata = get_peer_metadata()
     for iface in peer_info:
         metric = Metric(f"interface_info_{iface['iface']}",
                         'interface_information', 'summary')
         for peer in iface['peers']:
             peer.update(peer_metadata.get(peer['public_key'], {}))
             for k, v in peer.items():
                 if k not in [
                         'latency_ms', 'packet_loss', 'rx_bytes', 'tx_bytes'
                 ]:
                     continue
                 metric.add_sample(f"iface_information_{k}",
                                   value=str(v),
                                   labels={
                                       'hostname':
                                       os.environ.get(
                                           'SYNTROPY_AGENT_NAME',
                                           socket.gethostname()),
                                       'ifname':
                                       iface['iface'],
                                       'peer':
                                       peer['public_key'],
                                       'internal_ip':
                                       peer['internal_ip'],
                                       "device_id":
                                       peer.get('device_id'),
                                       "device_name":
                                       peer.get('device_name'),
                                       "device_public_ipv4":
                                       peer.get('device_public_ipv4')
                                   })
         yield metric
    def collect(self):
        cache_keys = {
            "total_csize": "Compressed size",
            "total_size": "Original size",
            "unique_csize": "Deduplicated size",
        }
        repo_keys = {"last_modified": "Last backup date"}

        for d in self.dirs:
            host = basename(d)
            data = json.loads(
                subprocess.check_output(["/usr/bin/borg", "info", d, "--json"],
                                        input=b"y"))
            stats = data["cache"]["stats"]
            for key, desc in cache_keys.items():
                mkey = f"borg_{key}"
                metric = Metric(mkey, desc, "gauge")
                metric.add_sample(mkey,
                                  value=stats[key],
                                  labels={"host": host})
                yield metric

            repo = data["repository"]
            for key, desc in repo_keys.items():
                mkey = f"borg_{key}"
                metric = Metric(mkey, desc, "gauge")
                diff = datetime.now() - datetime.strptime(
                    repo[key], "%Y-%m-%dT%H:%M:%S.%f")
                value = 0 if diff.seconds < 0 else diff.seconds
                metric.add_sample(mkey, value=value, labels={"host": host})
                yield metric
Exemplo n.º 18
0
    async def get_pricing(self):
        name = "get_pricing"
        start = time.time()
        print(name + ": Starting")

        try:
            resp = requests.get(
                "https://coinmarketcap.com/currencies/chia-network/")
            soup = BeautifulSoup(resp.content, features="lxml")
            usd_price = float(
                soup.find_all("div",
                              class_="priceValue___11gHJ")[0].text[1:].replace(
                                  ",", ""))
            vol_24hr = float(
                soup.find_all("div",
                              class_="statsValue___2iaoZ")[2].text[1:].replace(
                                  ",", ""))

            metric1 = Metric('chia_usd_price', 'Chia USD Price', "summary")
            metric1.add_sample('chia_usd_price', value=usd_price, labels={})
            metric2 = Metric('chia_24hr_volume_usd',
                             'Chia 24 Hour Volume Traded', "summary")
            metric2.add_sample('chia_24hr_volume_usd',
                               value=vol_24hr,
                               labels={})
        except Exception as e:
            print(name + ": Failed to run. Error follows")
            print(e)
            return []

        print(name + ": Done in " + str(time.time() - start) + " seconds")
        return [metric1, metric2]
Exemplo n.º 19
0
 def collect(self):
     # Fetch the JSON
     response = json.loads(
         requests.get(self._endpoint).content.decode('UTF-8'))
     # Metrics with labels for the documents loaded
     metric = Metric('svc_fps', 'Requests failed', 'gauge')
     metric.add_sample('svc_fps', value=response, labels={})
     yield metric
Exemplo n.º 20
0
 def collect(self):
     metric = Metric("hh", "help", 'histogram')
     # This is not sane, but it covers all the cases.
     metric.add_sample("hh_bucket", {"le": "1"}, 0, None, Exemplar({'a': 'b'}, 0.5))
     metric.add_sample("hh_bucket", {"le": "2"}, 0, None, Exemplar({'le': '7'}, 0.5, 12))
     metric.add_sample("hh_bucket", {"le": "3"}, 0, 123, Exemplar({'a': 'b'}, 2.5, 12))
     metric.add_sample("hh_bucket", {"le": "4"}, 0, None, Exemplar({'a': '\n"\\'}, 3.5))
     metric.add_sample("hh_bucket", {"le": "+Inf"}, 0, None, None)
     yield metric
Exemplo n.º 21
0
    def collect(self):

        # get metric about control nodes
        urls = self._endpoint

        metric = Metric('contrail_status', '', 'gauge')

        # Fetch the JSON
        for j in range(len(urls)):

            response = json.loads(
                requests.get(urls[j]).content.decode('UTF-8'))
            json_all_node = response['value']
            number_node = len(json_all_node)

            # Add metric system_mem_usage_used

            # metric = Metric('contrail_status', '', 'gauge')
            for i in range(number_node):
                current_json_node = json_all_node[i]['value']
                if ('NodeStatus' in current_json_node
                        and 'process_info' in current_json_node['NodeStatus']):
                    current_metric = current_json_node['NodeStatus'][
                        'process_info']
                    for k in range(len(current_metric)):
                        metric.add_sample(
                            'contrail_status',
                            value=1 if current_metric[k]['process_state']
                            == 'PROCESS_STATE_RUNNING' else 0,
                            labels={
                                'process_name':
                                current_metric[k]['process_name'],
                                'process_state':
                                current_metric[k]['process_state'],
                                'last_start_time':
                                current_metric[k]['last_start_time'],
                                'node':
                                re.sub('.local', '', json_all_node[i]['name']),
                                'node_type':
                                re.sub(r'[:-]', '_', self._node[j])
                            })
        yield metric
        # Metric
        for j in range(len(urls)):

            response = json.loads(
                requests.get(urls[j]).content.decode('UTF-8'))
            json_all_node = response['value']
            number_node = len(json_all_node)
            metric = Metric(re.sub(r'[:-]', '_', self._node[j]),
                            'List of node available', 'gauge')
            for k in range(number_node):
                metric.add_sample(
                    re.sub(r'[:-]', '_', self._node[j]),
                    value=1,
                    labels={'config_host': json_all_node[k]['name']})
            yield metric
Exemplo n.º 22
0
 def collect(self):
   with lock:
     # query the api
     response = self.client.quote()
     metric = Metric('coin_market_quote', 'coinmarketcap quote', 'gauge')
     coinmarketmetric = f'coin_market_quote_{currency}'
     quote = response['data'][self.symbol]['quote'][currency]
     metric.add_sample(coinmarketmetric, value=float(quote['price']), labels={'symbol': self.symbol})
     yield metric
Exemplo n.º 23
0
 def _get_scrape_duration_metric(self, start_time):
     metric = Metric(
         'trafficserver_scrape_duration_seconds',
         'Time the Traffic Server scrape took, in seconds.',
         'gauge')
     metric.add_sample(
         'trafficserver_scrape_duration_seconds',
         value=time.time() - start_time,
         labels={})
     return metric
 def getMetrics(self,port):
     metrics=[]
     data={"fast": random.random(),"slow": random.random()} 
     for index in range(100):
         metric = Metric("svcs_"+str(index)+"_"+ str(port)+"_documents_loaded", 'Help text', 'gauge')
         for k, v in data.items():
            metric.add_sample("svcs_"+str(index)+"_"+ str(port)+"_documents_loaded",value=v, labels={'repository': k})
         metrics.append(metric)   
         #metrics.append(GaugeMetricFamily("svcs_"+str(index)+"_"+ str(port)+"_documents_loaded", 'Help text', value=random.random()))
       
     return metrics  
    def collect(self):
        packages = checkupdates()
        metric = Metric('arch_checkupdates', 'Arch Linux Packages out of date', 'gauge')
        metric.add_sample('arch_checkupdates', value=(packages), labels={})
        yield metric

        security_issues = vulernablepackges()

        metric = Metric('arch_audit', 'Arch Audit Packages', 'gauge')
        metric.add_sample('arch_audit', value=(security_issues), labels={})
        yield metric
Exemplo n.º 26
0
    def collect(self):
        bmp = BMP085(0x77)
        temp = bmp.readTemperature()
        pressure = bmp.readPressure()

        temp_metric = Metric('bmp180_temp', 'BMP180 temperature', 'gauge')
        temp_metric.add_sample('bmp180_temp', value=temp, labels={})
        yield temp_metric

        pressure_metric = Metric('bmp180_pressure', 'BMP180 pressure', 'gauge')
        pressure_metric.add_sample('bmp180_pressure', value=pressure, labels={})
        yield pressure_metric
Exemplo n.º 27
0
    def collect(self):
        start_time = datetime.datetime.now()
        ret = self.kube_metrics()
        end_time = datetime.datetime.now()
        total_time = (end_time - start_time).total_seconds()

        nodes = ret['nodes']
        pods = ret['pods']

        metrics_nodes_mem = Metric('kube_metrics_server_nodes_mem', 'Metrics Server Nodes Memory', 'gauge')
        metrics_nodes_cpu = Metric('kube_metrics_server_nodes_cpu', 'Metrics Server Nodes CPU', 'gauge')

        metrics_response_time = Metric('kube_metrics_server_response_time', 'Metrics Server API Response Time', 'gauge')
        metrics_response_time.add_sample('kube_metrics_server_response_time', value=total_time, labels={ 'api_url': '{}/metrics.k8s.io'.format(self.api_url) })
        yield metrics_response_time

        for node in nodes.get('items', []):
            node_instance = node['metadata']['name']
            node_cpu = node['usage']['cpu']
            node_cpu = node_cpu.translate(str.maketrans('', '', string.ascii_letters))
            node_mem = node['usage']['memory']
            node_mem = node_mem.translate(str.maketrans('', '', string.ascii_letters))

            metrics_nodes_mem.add_sample('kube_metrics_server_nodes_mem', value=int(node_mem), labels={ 'instance': node_instance })
            metrics_nodes_cpu.add_sample('kube_metrics_server_nodes_cpu', value=int(node_cpu), labels={ 'instance': node_instance })

        yield metrics_nodes_mem
        yield metrics_nodes_cpu

        metrics_pods_mem = Metric('kube_metrics_server_pods_mem', 'Metrics Server Pods Memory', 'gauge')
        metrics_pods_cpu = Metric('kube_metrics_server_pods_cpu', 'Metrics Server Pods CPU', 'gauge')

        for pod in pods.get('items', []):
            pod_name = pod['metadata']['name']
            pod_namespace = pod['metadata']['namespace']

            pod_container_mem = 0
            pod_container_cpu = 0
            pod_container_name = ""

            for container in pod['containers']:
                pod_container_name = container['name']
                pod_container_cpu = container['usage']['cpu']
                pod_container_cpu = pod_container_cpu.translate(str.maketrans('', '', string.ascii_letters))
                pod_container_mem = container['usage']['memory']
                pod_container_mem = pod_container_mem.translate(str.maketrans('', '', string.ascii_letters))

                if not any(blacklisted in self.names_blacklist for blacklisted in [pod_container_name, pod_name, pod_namespace]):
                    metrics_pods_mem.add_sample('kube_metrics_server_pods_mem', value=int(pod_container_mem), labels={ 'pod_name': pod_name, 'pod_namespace': pod_namespace, 'pod_container_name': pod_container_name })
                    metrics_pods_cpu.add_sample('kube_metrics_server_pods_cpu', value=int(pod_container_cpu), labels={ 'pod_name': pod_name, 'pod_namespace': pod_namespace, 'pod_container_name': pod_container_name })

        yield metrics_pods_mem
        yield metrics_pods_cpu
Exemplo n.º 28
0
 def _get_scrape_duration_metric(self, start_time):
     metric = Metric(
         "trafficserver_scrape_duration_seconds",
         "Time the Traffic Server scrape took, in seconds.",
         "gauge",
     )
     metric.add_sample(
         "trafficserver_scrape_duration_seconds",
         value=time.time() - start_time,
         labels={},
     )
     return metric
Exemplo n.º 29
0
    def collect(self):
        # define environment variables
        zabbix_exp_url = os.environ.get('ZABBIX_EXP_URL', 'http://localhost/')
        zabbix_exp_username = os.environ.get('ZABBIX_EXP_USERNAME', 'Admin')
        zabbix_exp_password = os.environ.get('ZABBIX_EXP_PASSWORD', 'zabbix')

        zapi = ZabbixAPI(zabbix_exp_url)
        zapi.login(zabbix_exp_username, zabbix_exp_password)

        # create a prometheus metric
        metric = Metric('zabbix_warning', 'Current Zabbix Warning Count',
                        'gauge')
        # Get a list of all issues (AKA tripped triggers)

        triggers = zapi.trigger.get(
            only_true=1,
            skipDependent=1,
            monitored=1,
            active=1,
            output='extend',
            expandDescription=1,
            selectHosts=['host'],
        )

        # Do another query to find out which issues are Unacknowledged
        unack_triggers = zapi.trigger.get(
            only_true=1,
            skipDependent=1,
            monitored=1,
            active=1,
            output='extend',
            expandDescription=1,
            selectHosts=['host'],
            withLastEventUnacknowledged=1,
        )

        unack_trigger_ids = [t['triggerid'] for t in unack_triggers]

        for t in triggers:
            t['unacknowledged'] = True if t['triggerid'] in unack_trigger_ids \
                else False

        # Print a list containing only "tripped" triggers
        # Sum triggers which value is 1
        warn_cnt = 0
        for t in triggers:
            if int(t['value']) == 1:
                warn_cnt += 1

        # append data to the metric
        metric.add_sample('zabbix_warning', value=int(warn_cnt), labels={})
        yield metric
Exemplo n.º 30
0
    def collect(self):
        print 'Go to localhost'
        user1 = os.popen(
            "mpstat -P ALL 1 1 | awk '/Average:/ && $2 ~ /[a-z]/ {print $3}'"
        ).readline().strip()
        nice1 = os.popen(
            "mpstat -P ALL 1 1 | awk '/Average:/ && $2 ~ /[a-z]/ {print $4}'"
        ).readline().strip()
        syst = os.popen(
            "mpstat -P ALL 1 1 | awk '/Average:/ && $2 ~ /[a-z]/ {print $5}'"
        ).readline().strip()
        iowait1 = os.popen(
            "mpstat -P ALL 1 1 | awk '/Average:/ && $2 ~ /[a-z]/ {print $6}'"
        ).readline().strip()

        idle1 = os.popen(
            "mpstat -P ALL 1 1 | awk '/Average:/ && $2 ~ /[a-z]/ {print $12}'"
        ).readline().strip()
        metric = Metric('Utilization_at_user_level',
                        'Percentage of utilization at user level', 'summary')
        metric.add_sample('Utilization_at_user_level',
                          value=float(user1),
                          labels={})
        yield metric

        metric = Metric(
            'Utilization_at_user_level_nicepriority',
            'Percentage of utilization at user level with nice priority',
            'summary')
        metric.add_sample('Utilization_at_user_level_nicepriority',
                          value=float(nice1),
                          labels={})
        yield metric

        metric = Metric('Utilization_at_kernel_level',
                        'Percentage of utilization at kernel level', 'summary')
        metric.add_sample('Utilization_at_kernel_level',
                          value=float(syst),
                          labels={})
        yield metric

        metric = Metric('IO_Wait', 'Time for which Cput is waiting for IO',
                        'summary')
        metric.add_sample('Utilization_at_kernel_level',
                          value=float(iowait1),
                          labels={})
        yield metric

        metric = Metric('time_when_cpu_idle', 'Time for which Cpu was idle',
                        'summary')
        metric.add_sample('time_when_cpu_idle', value=float(idle1), labels={})
        yield metric
Exemplo n.º 31
0
    def collect(self):
        self.metrics = self._get_metrics()

        if self.metrics:
            for k, v in self.metrics.items():
                metric = Metric(k, k, 'gauge')
                labels = {}
                metric.add_sample(k, value=v, labels=labels)

                if metric.samples:
                    yield metric
                else:
                    pass
Exemplo n.º 32
0
  def collect(self):
    Symbols = [ 'GOOG', 'CSCO', 'BABA', 'APPL', 'IBM', 'GLOB' ]
    #Symbols = [ 'GOOG' ]

    strSymbols = cat_to_string(Symbols)
    JSp = GoogleFinanceAPI()
   
    if JSp.get(strSymbols):
        #JSp.Quotes2Stdout()	# // Show a little data, just for testing
        JSp.JsonQot2Obj()
        metric = Metric('stock_quotes', 'stock quotes last price', 'gauge')
        for quote in JSp.QuotesList:
            # Convert quotes to metric
            metric.add_sample('stock_quotes', value=float(quote.Last), labels={'symbol': quote.Symbol})
        yield metric	
Exemplo n.º 33
0
    def _parse_volume_metrics(self, data, volume):
        metric = Metric(
            'trafficserver_cache_avail_size_bytes_total',
            'Total cache available.',
            'gauge')
        metric.add_sample(
            'trafficserver_cache_avail_size_bytes_total',
            value=data[('proxy.process.cache.volume_{0}.'
                        'bytes_used').format(volume)],
            labels={'volume': str(volume)})
        yield metric

        metric = Metric(
            'trafficserver_cache_used_bytes_total',
            'Total cache used in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_cache_used_bytes_total',
            value=data[('proxy.process.cache.volume_{0}.'
                        'bytes_total').format(volume)],
            labels={'volume': str(volume)})
        yield metric

        metric = Metric(
            'trafficserver_cache_operations_total',
            'Cache operation count.',
            'counter')
        for op in ('lookup', 'read', 'write', 'update', 'remove',
                   'evacuate', 'scan', 'read_busy'):
            for result in ('success', 'failure'):
                k = 'proxy.process.cache.volume_{volume}.{op}.{result}'.format(
                    volume=volume, op=op, result=result)
                metric.add_sample(
                    'trafficserver_cache_operations_total',
                    value=data[k],
                    labels={'volume': str(volume),
                            'operation': op,
                            'result': result})
        yield metric
Exemplo n.º 34
0
    def parse_metrics(self, data):
        """Generator for trafficserver metrics."""
        # Counter for server restarts
        metric = Metric(
            'trafficserver_restart_count',
            'Count of traffic_server restarts.',
            'counter')
        metric.add_sample(
            'trafficserver_restart_count',
            value=data['proxy.node.restarts.proxy.restart_count'],
            labels={})
        yield metric

        #
        # HTTP
        #
        # Connections
        metric = Metric(
            'trafficserver_connections_total',
            'Connection count.',
            'counter')
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_client_connections'],
            labels={'source': 'client',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_server_connections'],
            labels={'source': 'server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_connections_total',
            value=data['proxy.process.http.total_parent_proxy_connections'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        # Incoming requests
        metric = Metric(
            'trafficserver_requests_incoming',
            'Incoming requests.',
            'gauge')
        metric.add_sample(
            'trafficserver_requests_incoming',
            value=data['proxy.process.http.incoming_requests'],
            labels={'protocol': 'http'})
        yield metric

        # Client aborts
        metric = Metric(
            'trafficserver_error_client_aborts_total',
            'Client aborts.',
            'counter')
        metric.add_sample(
            'trafficserver_client_aborts_total',
            value=data['proxy.process.http.err_client_abort_count_stat'],
            labels={'protocol': 'http'})
        yield metric

        # Connect fails
        metric = Metric(
            'trafficserver_connect_failures_total',
            'Connect failures.',
            'counter')
        metric.add_sample(
            'trafficserver_connect_failures_total',
            value=data['proxy.process.http.err_connect_fail_count_stat'],
            labels={'protocol': 'http'})
        yield metric

        # Transaction count
        metric = Metric(
            'trafficserver_transactions_total',
            'Total transactions.',
            'counter')
        metric.add_sample(
            'trafficserver_transactions_total',
            value=data[('proxy.node.http.'
                        'user_agents_total_transactions_count')],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transactions_total',
            value=data[('proxy.node.http.'
                        'origin_server_total_transactions_count')],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, total
        metric = Metric(
            'trafficserver_transactions_time_ms_total',
            'Total transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_transactions_time_total',
            value=data['proxy.process.http.total_transactions_time'],
            labels={})
        yield metric

        # Transaction time spent, hits
        metric = Metric(
            'trafficserver_hit_transaction_time_ms_total',
            'Total cache hit transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_hit_transaction_time_ms_total',
            value=data['proxy.process.http.transaction_totaltime.hit_fresh'],
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_hit_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'hit_revalidated')],
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, misses
        metric = Metric(
            'trafficserver_miss_transaction_time_ms_total',
            'Total cache miss transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data['proxy.process.http.transaction_totaltime.miss_cold'],
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_not_cacheable')],
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_changed')],
            labels={'state': 'changed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_miss_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.'
                        'miss_client_no_cache')],
            labels={'state': 'no_cache',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, errors
        metric = Metric(
            'trafficserver_error_transaction_time_ms_total',
            'Total cache error transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'aborts')],
            labels={'state': 'abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'possible_aborts')],
            labels={'state': 'possible_abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'connect_failed')],
            labels={'state': 'connect_failed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_error_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.errors.'
                        'other')],
            labels={'state': 'other',
                    'protocol': 'http'})
        yield metric

        # Transaction time spent, other
        metric = Metric(
            'trafficserver_other_transaction_time_ms_total',
            'Total other/unclassified transaction time (ms).',
            'counter')
        metric.add_sample(
            'trafficserver_other_transaction_time_ms_total',
            value=data[('proxy.process.http.transaction_totaltime.other.'
                        'unclassified')],
            labels={'state': 'unclassified',
                    'protocol': 'http'})
        yield metric

        # Transaction count, hits
        metric = Metric(
            'trafficserver_transaction_hits_total',
            'Transaction hit counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'hit_fresh')],
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'hit_revalidated')],
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        # Zero labels (misses)
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_hits_total',
            value='0',
            labels={'state': 'changed',
                    'protocol': 'http'})
        yield metric

        # Transaction count, misses
        metric = Metric(
            'trafficserver_transaction_misses_total',
            'Transaction miss counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_cold')],
            labels={'state': 'cold',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_not_cacheable')],
            labels={'state': 'not_cacheable',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value=data[('proxy.process.http.transaction_counts.'
                        'miss_changed')],
            labels={'state': 'changed',
                    'protocol': 'http'})
        # Zero labels (hits)
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value='0',
            labels={'state': 'fresh',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_misses_total',
            value='0',
            labels={'state': 'revalidated',
                    'protocol': 'http'})
        yield metric

        # Transaction count, errors
        metric = Metric(
            'trafficserver_transaction_errors_total',
            'Transaction error counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'aborts')],
            labels={'state': 'abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'possible_aborts')],
            labels={'state': 'possible_abort',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'connect_failed')],
            labels={'state': 'connect_failed',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_transaction_errors_total',
            value=data[('proxy.process.http.transaction_counts.errors.'
                        'other')],
            labels={'state': 'other',
                    'protocol': 'http'})
        yield metric

        # Transaction count, others
        metric = Metric(
            'trafficserver_transaction_others_total',
            'Transaction other/unclassified counts.',
            'counter')
        metric.add_sample(
            'trafficserver_transaction_others_total',
            value=data[('proxy.process.http.transaction_counts.other.'
                        'unclassified')],
            labels={'state': 'unclassified',
                    'protocol': 'http'})
        yield metric

        # HTTP Responses
        metric = Metric(
            'trafficserver_responses_total',
            'Response count.',
            'counter')
        for code in TS_RESPONSE_CODES:
            key = 'proxy.process.http.{code}_responses'.format(code=code)
            metric.add_sample(
                'trafficserver_responses_total',
                value=data[key],
                labels={'code': code,
                        'protocol': 'http'})
        yield metric

        # HTTP Requests
        metric = Metric(
            'trafficserver_requests_total',
            'Request count.',
            'counter')
        for method in HTTP_VERBS_LOWER:
            key = 'proxy.process.http.{method}_requests'.format(method=method)
            metric.add_sample(
                'trafficserver_requests_total',
                value=data[key],
                labels={'method': method,
                        'protocol': 'http'})
        yield metric

        # Invalid requests
        metric = Metric(
            'trafficserver_client_requests_invalid_total',
            'Invalid client requests.',
            'counter')
        metric.add_sample(
            'trafficserver_client_requests_invalid_total',
            value=data['proxy.process.http.invalid_client_requests'],
            labels={'protocol': 'http'})
        yield metric

        # Requests without Host header
        metric = Metric(
            'trafficserver_client_requests_missing_host_hdr_total',
            'Client requests missing host header.',
            'counter')
        metric.add_sample(
            'trafficserver_client_requests_missing_host_hdr_total',
            value=data['proxy.process.http.missing_host_hdr'],
            labels={'protocol': 'http'})
        yield metric

        # Request size
        metric = Metric(
            'trafficserver_request_size_bytes_total',
            'Request size in bytes.',
            'counter')
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.user_agent_total_request_bytes'],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.origin_server_total_request_bytes'],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_request_size_bytes_total',
            value=data['proxy.node.http.parent_proxy_total_request_bytes'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        # Response size
        metric = Metric(
            'trafficserver_response_size_bytes_total',
            'Response size in bytes.',
            'counter')
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.user_agent_total_response_bytes'],
            labels={'source': 'user_agent',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.origin_server_total_response_bytes'],
            labels={'source': 'origin_server',
                    'protocol': 'http'})
        metric.add_sample(
            'trafficserver_response_size_bytes_total',
            value=data['proxy.node.http.parent_proxy_total_response_bytes'],
            labels={'source': 'parent_proxy',
                    'protocol': 'http'})
        yield metric

        #
        # Cache
        #
        # Gather all cache volumes for cache statistics
        volumes = set()
        for key in data:
            if key.startswith('proxy.process.cache.volume_'):
                m = CACHE_VOLUMES.match(key)
                volumes.add(int(m.group(1)))

        # Create all cache volume metrics
        for volume in volumes:
            for metric in self._parse_volume_metrics(data, volume):
                yield metric

        metric = Metric(
            'trafficserver_ram_cache_hits_total',
            'RAM cache hit count.',
            'counter')
        metric.add_sample(
            'trafficserver_ram_cache_hits_total',
            value=data['proxy.process.cache.ram_cache.hits'],
            labels={'volume': str(volume)})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_misses_total',
            'RAM cache miss count.',
            'counter')
        metric.add_sample(
            'trafficserver_ram_cache_misses_total',
            value=data['proxy.process.cache.ram_cache.misses'],
            labels={})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_avail_size_bytes_total',
            'RAM cache available in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_ram_cache_avail_size_bytes_total',
            value=data['proxy.process.cache.ram_cache.total_bytes'],
            labels={})
        yield metric

        metric = Metric(
            'trafficserver_ram_cache_used_bytes_total',
            'RAM cache used in bytes.',
            'gauge')
        metric.add_sample(
            'trafficserver_ram_cache_used_bytes_total',
            value=data['proxy.process.cache.ram_cache.bytes_used'],
            labels={})
        yield metric
Exemplo n.º 35
0
 def collect(self):
     metric = Metric("nonnumber", "Non number", 'untyped')
     metric.add_sample("nonnumber", {}, MyNumber())
     yield metric