Ejemplo n.º 1
0
    def test_duplicate_metrics_raises(self):
        registry = CollectorRegistry()
        Counter('c_total', 'help', registry=registry)
        self.assertRaises(ValueError, Counter, 'c_total', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'c_total', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'c_created', 'help', registry=registry)

        Gauge('g_created', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'g_created', 'help', registry=registry)
        self.assertRaises(ValueError, Counter, 'g', 'help', registry=registry)

        Summary('s', 'help', registry=registry)
        self.assertRaises(ValueError, Summary, 's', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 's_created', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 's_sum', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 's_count', 'help', registry=registry)
        # We don't currently expose quantiles, but let's prevent future
        # clashes anyway.
        self.assertRaises(ValueError, Gauge, 's', 'help', registry=registry)

        Histogram('h', 'help', registry=registry)
        self.assertRaises(ValueError, Histogram, 'h', 'help', registry=registry)
        # Clashes aggaint various suffixes.
        self.assertRaises(ValueError, Summary, 'h', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'h_count', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'h_sum', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'h_bucket', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'h_created', 'help', registry=registry)
        # The name of the histogram itself isn't taken.
        Gauge('h', 'help', registry=registry)

        Info('i', 'help', registry=registry)
        self.assertRaises(ValueError, Gauge, 'i_info', 'help', registry=registry)
Ejemplo n.º 2
0
    def test_timer_not_observable(self):
        g = Gauge('test', 'help', labelnames=('label',), registry=self.registry)

        try:
            g.time()
        except ValueError as e:
            self.assertIn('missing label values', str(e))
Ejemplo n.º 3
0
 def test_gauge_min(self):
     g1 = Gauge('g', 'help', registry=None, multiprocess_mode='min')
     core._ValueClass = core._MultiProcessValue(lambda: 456)
     g2 = Gauge('g', 'help', registry=None, multiprocess_mode='min')
     self.assertEqual(0, self.registry.get_sample_value('g'))
     g1.set(1)
     g2.set(2)
     self.assertEqual(1, self.registry.get_sample_value('g'))
Ejemplo n.º 4
0
 def test_gauge_max(self):
     g1 = Gauge('g', 'help', registry=None, multiprocess_mode='max')
     values.ValueClass = MultiProcessValue(lambda: 456)
     g2 = Gauge('g', 'help', registry=None, multiprocess_mode='max')
     self.assertEqual(0, self.registry.get_sample_value('g'))
     g1.set(1)
     g2.set(2)
     self.assertEqual(2, self.registry.get_sample_value('g'))
Ejemplo n.º 5
0
class ZpoolCollector(object):
    """
    'zpool status' checker
    """
    # timeout how match seconds is allowed to collect data
    max_time_to_run = 4
    zpool_collector_timeouts = Counter('solaris_exporter_zpool_timeouts',
                                       'timeouts')
    zpool_collector_errors = Counter(
        'solaris_exporter_zpool_errors',
        'Number of times when collector ran' + ' with errors')
    zpool_collector_run_time = Gauge('solaris_exporter_zpool_processing',
                                     'Time spent processing request')

    def collect(self):
        with self.zpool_collector_run_time.time():
            output, task_return_code, task_timeouted = run_shell_command(
                '/usr/sbin/zpool status', self.max_time_to_run)
            if task_return_code == 0 and task_timeouted is False:
                lines = output.splitlines()
                zpool = GaugeMetricFamily("solaris_exporter_zpool_faults",
                                          'faults in zpool status',
                                          labels=['host'])
                faults = 0
                for line in lines:
                    line = line.strip()
                    if any(s in line for s in ['FAILED', 'DEGRADED']):
                        faults += 1
                zpool.add_metric([host_name], float(faults))
                yield zpool
            else:
                self.zpool_collector_errors.inc()
                if task_timeouted:
                    self.zpool_collector_timeouts.inc()
Ejemplo n.º 6
0
class FmadmCollector(object):
    """
    'fmadm faulty' checker
    """
    # timeout how match seconds is allowed to collect data
    max_time_to_run = 15
    fmadm_collector_timeouts = Counter('solaris_exporter_fmadm_timeouts',
                                       'timeouts')
    fmadm_collector_errors = Counter(
        'solaris_exporter_fmadm_errors',
        'Number of times when collector ran' + ' with errors')
    fmadm_collector_run_time = Gauge('solaris_exporter_fmadm_processing',
                                     'Time spent processing request')

    def collect(self):
        with self.fmadm_collector_run_time.time():
            output, task_return_code, task_timeouted = run_shell_command(
                '/usr/bin/pfexec /usr/sbin/fmadm faulty', self.max_time_to_run)
            if task_return_code == 0 and task_timeouted is False:
                lines = output.splitlines()
                fmadm = GaugeMetricFamily("solaris_exporter_fmadm_faults",
                                          'faults in fmadm faulty',
                                          labels=['host'])
                faults = 0
                for line in lines:
                    if line.strip().startswith('TIME'):
                        faults += 1
                fmadm.add_metric([host_name], float(faults))
                yield fmadm
            else:
                self.fmadm_collector_errors.inc()
                if task_timeouted:
                    self.fmadm_collector_timeouts.inc()
Ejemplo n.º 7
0
class SVCSCollector(object):
    """
    'svcs -x' checker
    """
    # timeout how match seconds is allowed to collect data
    max_time_to_run = 4
    svcs_x_collector_timeouts = Counter('solaris_exporter_svcs_x_timeouts',
                                        'timeouts')
    svcs_x_collector_errors = Counter(
        'solaris_exporter_svcs_x_errors',
        'Number of times when collector ran' + ' with errors')
    svcs_x_collector_run_time = Gauge('solaris_exporter_svcs_x_processing',
                                      'Time spent processing request')

    def collect(self):
        with self.svcs_x_collector_run_time.time():
            output, task_return_code, task_timeouted = run_shell_command(
                '/usr/bin/svcs -x', self.max_time_to_run)
            if task_return_code == 0 and task_timeouted is False:
                lines = output.splitlines()
                svcs_x = GaugeMetricFamily(
                    "solaris_exporter_svcs_x_failed_services",
                    'failed services counter in svcs -x',
                    labels=['host'])
                svcs_fail = 0
                for line in lines:
                    if line.strip().startswith('svc:'):
                        svcs_fail += 1
                svcs_x.add_metric([host_name], float(svcs_fail))
            else:
                self.svcs_x_collector_errors.inc()
                if task_timeouted:
                    self.svcs_x_collector_timeouts.inc()
        yield svcs_x
Ejemplo n.º 8
0
class MemCollector(object):
    """
    Memory and SWAP Stats
    """
    mem_collector_run_time = Gauge('solaris_exporter_MemCollector_processing',
                                   'Time spent processing request')

    def collect(self):
        with self.mem_collector_run_time.time():
            worker_stat_mem = GaugeMetricFamily(
                'solaris_exporter_memory_usage_bytes',
                'python psutil counters, Memory usage in bytes.',
                labels=['host', 'type', 'counter'])
            ram = psutil.virtual_memory()
            swap = psutil.swap_memory()
            worker_stat_mem.add_metric([host_name, 'virtual', 'used'],
                                       ram.used)
            worker_stat_mem.add_metric([host_name, 'virtual', 'available'],
                                       ram.available)
            worker_stat_mem.add_metric([host_name, 'virtual', 'total'],
                                       ram.total)
            worker_stat_mem.add_metric([host_name, 'virtual', 'free'],
                                       ram.free)
            worker_stat_mem.add_metric([host_name, 'swap', 'total'],
                                       swap.total)
            worker_stat_mem.add_metric([host_name, 'swap', 'used'], swap.used)
            worker_stat_mem.add_metric([host_name, 'swap', 'free'], swap.free)
            worker_stat_mem.add_metric([host_name, 'swap', 'sin'], swap.sin)
            worker_stat_mem.add_metric([host_name, 'swap', 'sout'], swap.sout)
        yield worker_stat_mem
Ejemplo n.º 9
0
    def test_timer_not_observable(self):
        g = Gauge('test', 'help', labelnames=('label',), registry=self.registry)

        def manager():
            with g.time():
                pass

        assert_not_observable(manager)
Ejemplo n.º 10
0
 def test_gauge_livesum(self):
     g1 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum')
     values.ValueClass = MultiProcessValue(lambda: 456)
     g2 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum')
     self.assertEqual(0, self.registry.get_sample_value('g'))
     g1.set(1)
     g2.set(2)
     self.assertEqual(3, self.registry.get_sample_value('g'))
     mark_process_dead(123, os.environ['PROMETHEUS_MULTIPROC_DIR'])
     self.assertEqual(2, self.registry.get_sample_value('g'))
Ejemplo n.º 11
0
 def test_gauge_livesum(self):
     g1 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum')
     core._ValueClass = core._MultiProcessValue(lambda: 456)
     g2 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum')
     self.assertEqual(0, self.registry.get_sample_value('g'))
     g1.set(1)
     g2.set(2)
     self.assertEqual(3, self.registry.get_sample_value('g'))
     mark_process_dead(123, os.environ['prometheus_multiproc_dir'])
     self.assertEqual(2, self.registry.get_sample_value('g'))
    def test_aggregates_live_and_archived_metrics(self):
        pid = 456
        values.ValueClass = MultiProcessValue(lambda: pid)

        def files():
            fs = os.listdir(os.environ['prometheus_multiproc_dir'])
            fs.sort()
            return fs

        c1 = Counter('c1', 'c1', registry=None)
        c1.inc(1)
        self.assertIn('counter_456.db', files())

        archive_metrics()
        self.assertNotIn('counter_456.db', files())
        self.assertEqual(1, self.registry.get_sample_value('c1_total'))

        pid = 789
        values.ValueClass = MultiProcessValue(lambda: pid)
        c1 = Counter('c1', 'c1', registry=None)
        c1.inc(2)
        g1 = Gauge('g1', 'g1', registry=None, multiprocess_mode="liveall")
        g1.set(5)
        self.assertIn('counter_789.db', files())
        # Pretend that pid 789 is live
        archive_metrics(aggregate_only=True)

        # The live counter should be merged with the archived counter, and the
        # liveall gauge should be included
        self.assertIn('counter_789.db', files())
        self.assertIn('gauge_liveall_789.db', files())
        self.assertEqual(3, self.registry.get_sample_value('c1_total'))
        self.assertEqual(
            5, self.registry.get_sample_value('g1', labels={u'pid': u'789'}))
        # Now pid 789 is dead
        archive_metrics()

        # The formerly live counter's value should be archived, and the
        # liveall gauge should be removed completely
        self.assertNotIn('counter_789.db', files())
        self.assertNotIn('gauge_liveall_789.db', files())
        self.assertEqual(3, self.registry.get_sample_value('c1_total'))
        self.assertEqual(
            None, self.registry.get_sample_value('g1', labels={u'pid':
                                                               u'789'}))
Ejemplo n.º 13
0
    def __init__(self, sonar_client: SonarQubeClient):
        self._sonar_client = sonar_client
        self._cached_metrics = []

        # initialize gauges
        logging.info("Intitializing...")
        self._metrics = {}
        raw_metrics = self._sonar_client.get_metrics()["metrics"]
        for raw_metric in raw_metrics:
            metric = Metric()
            for supported_m in CONF.supported_keys:
                if "domain" in raw_metric and raw_metric[
                        "domain"] == supported_m["domain"] and raw_metric[
                            "key"] in supported_m["keys"]:
                    metric.domain = raw_metric["domain"]
                    metric.key = raw_metric["key"]
                    metric.type = raw_metric["type"]
                    if "description" in raw_metric:
                        metric.description = raw_metric["description"]
                    else:
                        metric.description = raw_metric["name"]
                    if "tranformKeys" in supported_m and raw_metric[
                            "key"] in supported_m["tranformKeys"].keys():
                        metric.tranform = True
                        metric.tranform_map = supported_m["tranformKeys"][
                            raw_metric["key"]]
                    self._metrics[metric.key] = metric
        self._queried_metrics = str()
        self._gauges = {}
        labels = ("id", "key", "name", "domain", "type")
        for key, m in self._metrics.items():
            if m.tranform:
                self._gauges[m.key] = Gauge(name="sonar_{}".format(m.key),
                                            documentation=m.description,
                                            labelnames=("id", "key", "name",
                                                        "domain", "type",
                                                        "value"))
            else:
                self._gauges[m.key] = Gauge(name="sonar_{}".format(m.key),
                                            documentation=m.description,
                                            labelnames=("id", "key", "name",
                                                        "domain", "type"))
            self._queried_metrics = "{},{}".format(m.key,
                                                   self._queried_metrics)
        logging.info("Initialized %s metrics." % len(self._metrics.keys()))
Ejemplo n.º 14
0
 def test_gauge_max(self):
     self.pid = 123
     g1 = Gauge('gmax', 'help', registry=None, multiprocess_mode='max')
     self.pid = 456
     g2 = Gauge('gmax', 'help', registry=None, multiprocess_mode='max')
     self.assertEqual(0, self.registry.get_sample_value('gmax', {'hostname':hostname}))
     self.pid = 123
     g1.set(1)
     self.pid = 456
     g2.set(2)
     self.assertEqual(2, self.registry.get_sample_value('gmax', {'hostname':hostname}))
Ejemplo n.º 15
0
 def test_gauge_all(self):
     g1 = Gauge('g', 'help', registry=None)
     core._ValueClass = core._MultiProcessValue(lambda: 456)
     g2 = Gauge('g', 'help', registry=None)
     self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'}))
     self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'}))
     g1.set(1)
     g2.set(2)
     mark_process_dead(123, os.environ['prometheus_multiproc_dir'])
     self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'}))
     self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'}))
Ejemplo n.º 16
0
 def test_unregister_works(self):
     registry = CollectorRegistry()
     s = Summary('s', 'help', registry=registry)
     self.assertRaises(ValueError,
                       Gauge,
                       's_count',
                       'help',
                       registry=registry)
     registry.unregister(s)
     Gauge('s_count', 'help', registry=registry)
Ejemplo n.º 17
0
    def test_reset_registry_with_labels(self):
        registry = CollectorRegistry()

        gauge = Gauge('g', 'help', ['l'], registry=registry)
        gauge.labels('a').inc()
        self.assertEqual(1, registry.get_sample_value('g', {'l': 'a'}))

        counter = Counter('c_total', 'help', ['l'], registry=registry)
        counter.labels('a').inc()
        self.assertEqual(1, registry.get_sample_value('c_total', {'l': 'a'}))

        summary = Summary('s', 'help', ['l'], registry=registry)
        summary.labels('a').observe(10)
        self.assertEqual(1, registry.get_sample_value('s_count', {'l': 'a'}))
        self.assertEqual(10, registry.get_sample_value('s_sum', {'l': 'a'}))

        histogram = Histogram('h', 'help', ['l'], registry=registry)
        histogram.labels('a').observe(2)
        self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '1.0', 'l': 'a'}))
        self.assertEqual(1, registry.get_sample_value('h_bucket', {'le': '2.5', 'l': 'a'}))
        self.assertEqual(1, registry.get_sample_value('h_bucket', {'le': '5.0', 'l': 'a'}))
        self.assertEqual(1, registry.get_sample_value('h_bucket', {'le': '+Inf', 'l': 'a'}))
        self.assertEqual(1, registry.get_sample_value('h_count', {'l': 'a'}))
        self.assertEqual(2, registry.get_sample_value('h_sum', {'l': 'a'}))


        registry.reset()

        self.assertEqual(0, registry.get_sample_value('g', {'l': 'a'}))

        self.assertEqual(0, registry.get_sample_value('c_total', {'l': 'a'}))

        self.assertEqual(0, registry.get_sample_value('s_count', {'l': 'a'}))
        self.assertEqual(0, registry.get_sample_value('s_sum', {'l': 'a'}))

        self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '1.0', 'l': 'a'}))
        self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '2.5', 'l': 'a'}))
        self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '5.0', 'l': 'a'}))
        self.assertEqual(0, registry.get_sample_value('h_bucket', {'le': '+Inf', 'l': 'a'}))
        self.assertEqual(0, registry.get_sample_value('h_count', {'l': 'a'}))
        self.assertEqual(0, registry.get_sample_value('h_sum', {'l': 'a'}))
Ejemplo n.º 18
0
 def test_gauge_livesum(self):
     self.pid = 123
     g1 = Gauge('gls', 'help', registry=None, multiprocess_mode='livesum')
     self.pid = 456
     g2 = Gauge('gls', 'help', registry=None, multiprocess_mode='livesum')
     self.assertEqual(0, self.registry.get_sample_value('gls', {'hostname':hostname}))
     self.pid = 123
     g1.set(1)
     self.pid = 456
     g2.set(2)
     self.assertEqual(3, self.registry.get_sample_value('gls', {'hostname':hostname}))
     from prometheus_client.distributed import mark_distributed_process_dead
     mark_distributed_process_dead(123)
     self.assertEqual(2, self.registry.get_sample_value('gls', {'hostname':hostname}))
Ejemplo n.º 19
0
 def test_gauge_last(self):
     self.pid = 123
     g1 = Gauge('g1last', 'help', registry=None, multiprocess_mode='last')
     g1.set(1)
     self.pid = 456
     g1.set(2)
     self.assertEqual(2, self.registry.get_sample_value('g1last'))
Ejemplo n.º 20
0
    def test_collect(self):
        pid = 0
        core._ValueClass = core._MultiProcessValue(lambda: pid)
        labels = dict((i, i) for i in 'abcd')

        def add_label(key, value):
            l = labels.copy()
            l[key] = value
            return l

        c = Counter('c', 'help', labelnames=labels.keys(), registry=None)
        g = Gauge('g', 'help', labelnames=labels.keys(), registry=None)
        h = Histogram('h', 'help', labelnames=labels.keys(), registry=None)

        c.labels(**labels).inc(1)
        g.labels(**labels).set(1)
        h.labels(**labels).observe(1)

        pid = 1

        c.labels(**labels).inc(1)
        g.labels(**labels).set(1)
        h.labels(**labels).observe(5)

        metrics = dict((m.name, m) for m in self.collector.collect())

        self.assertEqual(
            metrics['c'].samples, [Sample('c_total', labels, 2.0)]
        )
        metrics['g'].samples.sort(key=lambda x: x[1]['pid'])
        self.assertEqual(metrics['g'].samples, [
            Sample('g', add_label('pid', '0'), 1.0),
            Sample('g', add_label('pid', '1'), 1.0),
        ])

        metrics['h'].samples.sort(
            key=lambda x: (x[0], float(x[1].get('le', 0)))
        )
        expected_histogram = [
            Sample('h_bucket', add_label('le', '0.005'), 0.0),
            Sample('h_bucket', add_label('le', '0.01'), 0.0),
            Sample('h_bucket', add_label('le', '0.025'), 0.0),
            Sample('h_bucket', add_label('le', '0.05'), 0.0),
            Sample('h_bucket', add_label('le', '0.075'), 0.0),
            Sample('h_bucket', add_label('le', '0.1'), 0.0),
            Sample('h_bucket', add_label('le', '0.25'), 0.0),
            Sample('h_bucket', add_label('le', '0.5'), 0.0),
            Sample('h_bucket', add_label('le', '0.75'), 0.0),
            Sample('h_bucket', add_label('le', '1.0'), 1.0),
            Sample('h_bucket', add_label('le', '2.5'), 1.0),
            Sample('h_bucket', add_label('le', '5.0'), 2.0),
            Sample('h_bucket', add_label('le', '7.5'), 2.0),
            Sample('h_bucket', add_label('le', '10.0'), 2.0),
            Sample('h_bucket', add_label('le', '+Inf'), 2.0),
            Sample('h_count', labels, 2.0),
            Sample('h_sum', labels, 6.0),
        ]

        self.assertEqual(metrics['h'].samples, expected_histogram)
Ejemplo n.º 21
0
 def test_gauge_all(self):
     g1 = Gauge('g', 'help', registry=None)
     values.ValueClass = MultiProcessValue(lambda: 456)
     g2 = Gauge('g', 'help', registry=None)
     self.assertEqual(0,
                      self.registry.get_sample_value('g', {'pid': '123'}))
     self.assertEqual(0,
                      self.registry.get_sample_value('g', {'pid': '456'}))
     g1.set(1)
     g2.set(2)
     mark_process_dead(123)
     self.assertEqual(1,
                      self.registry.get_sample_value('g', {'pid': '123'}))
     self.assertEqual(2,
                      self.registry.get_sample_value('g', {'pid': '456'}))
Ejemplo n.º 22
0
class FCinfoCollector(object):
    """
    FC links Multipath
    """
    # timeout how match seconds is allowed to collect data
    max_time_to_run = 4
    fc_lun_collector_timeouts = Counter('solaris_exporter_fc_paths_timeouts',
                                        'timeouts')
    fc_lun_collector_errors = Counter(
        'solaris_exporter_fc_paths_errors',
        'Number of times when collector ran' + ' with errors')
    fc_lun_collector_run_time = Gauge('solaris_exporter_fc_paths_processing',
                                      'Time spent processing request')

    def collect(self):
        with self.fc_lun_collector_run_time.time():
            output, task_return_code, task_timeouted = run_shell_command(
                '/usr/sbin/mpathadm list lu', self.max_time_to_run)
            if task_return_code == 0 and task_timeouted is False:
                lines = output.splitlines()
                fc_lun = GaugeMetricFamily("solaris_exporter_fc_paths",
                                           '/usr/sbin/mpathadm list lu',
                                           labels=['device', 'stat', 'host'])
                fc_total_paths = {}
                fc_active_paths = {}
                for line in lines:
                    content = line.strip()
                    if '/dev/rdsk/' in content:
                        device = re.sub(r'/dev/rdsk/(.*)s2', r'\1', content)
                    elif 'Total Path Count' in content:
                        content = content.split(':')
                        fc_total_paths[device] = content[1]
                    elif 'Operational Path Count:' in content:
                        content = content.split(':')
                        fc_active_paths[device] = content[1]
                    else:
                        device = "unknown"
                for device in fc_total_paths.keys():
                    if device == "unknown":
                        continue
                    fc_lun.add_metric([device, 'active', host_name],
                                      float(fc_active_paths.get(device, 0)))
                    fc_lun.add_metric([device, 'total', host_name],
                                      float(fc_total_paths.get(device, 0)))
                yield fc_lun
            else:
                self.fc_lun_collector_errors.inc()
                if task_timeouted:
                    self.fc_lun_collector_timeouts.inc()
 def test_gauge_all(self):
     values.ValueClass = MultiProcessValue(lambda: 123)
     g1 = Gauge('g', 'help', registry=None, multiprocess_mode='all')
     values.ValueClass = MultiProcessValue(lambda: 456)
     g2 = Gauge('g', 'help', registry=None, multiprocess_mode='all')
     self.assertEqual(0,
                      self.registry.get_sample_value('g', {'pid': '123'}))
     self.assertEqual(0,
                      self.registry.get_sample_value('g', {'pid': '456'}))
     g1.set(1)
     g2.set(2)
     archive_metrics()
     mark_process_dead(123, os.environ['prometheus_multiproc_dir'])
     self.assertEqual(1,
                      self.registry.get_sample_value('g', {'pid': '123'}))
     self.assertEqual(2,
                      self.registry.get_sample_value('g', {'pid': '456'}))
Ejemplo n.º 24
0
class DiskSpaceCollector(object):
    """
    Disk space stats
    Note that UFS inode info is NOT collected.
    """
    disk_space_collector_run_time = Gauge('solaris_exporter_diskspace_worker',
                                          'Time spent processing request')

    def collect(self):
        with self.disk_space_collector_run_time.time():
            worker_stat_space = GaugeMetricFamily(
                'solaris_exporter_diskspace_usage_bytes',
                'python psutil counters, diskspace usage in bytes.',
                labels=[
                    'host',
                    'statistic',
                    'mountpoint',
                    'device',
                    'fstype',
                ])

            # disk_partitions = my_disk_partitions(all=False)   # rewritten due to bug: https://github.com/giampaolo/psutil/issues/1674
            disk_partitions = cext.disk_partitions()
            for partition in disk_partitions:
                device, mountpoint, fstype, opts = partition
                if fstype not in ['zfs', 'ufs']:
                    continue
                if '/VARSHARE' in device:
                    continue
                try:
                    spaceinfo = psutil.disk_usage(mountpoint)
                except OSError:
                    continue
                worker_stat_space.add_metric(
                    [host_name, 'used', mountpoint, device, fstype],
                    spaceinfo.used)
                worker_stat_space.add_metric(
                    [host_name, 'total', mountpoint, device, fstype],
                    spaceinfo.total)
                worker_stat_space.add_metric(
                    [host_name, 'free', mountpoint, device, fstype],
                    spaceinfo.free)
                worker_stat_space.add_metric(
                    [host_name, 'percent', mountpoint, device, fstype],
                    spaceinfo.percent)
        yield worker_stat_space
Ejemplo n.º 25
0
class CpuTimeCollector(object):
    """
    CPU time may be translated in percent later
    """
    cpu_time_collector_run_time = Gauge('solaris_exporter_cpu_time_processing',
                                        'Time spent processing request')

    def collect(self):
        with self.cpu_time_collector_run_time.time():
            worker_stat_cpu_time = CounterMetricFamily(
                'solaris_exporter_cpu_time',
                'python psutil counters, CPU usage time.',
                labels=['host', 'statistic'])
            cpuinfo = psutil.cpu_times(percpu=False)
            worker_stat_cpu_time.add_metric([host_name, 'user'], cpuinfo.user)
            worker_stat_cpu_time.add_metric([host_name, 'system'],
                                            cpuinfo.system)
            worker_stat_cpu_time.add_metric([host_name, 'idle'], cpuinfo.idle)
            worker_stat_cpu_time.add_metric([host_name, 'oiwait'],
                                            cpuinfo.iowait)
        yield worker_stat_cpu_time
Ejemplo n.º 26
0
class CpuLoadCollector(object):
    """
    CPU load average 1, 5, 15 min, cpu count
    """
    cpu_load_collector_run_time = Gauge('solaris_exporter_cpu_load_processing',
                                        'Time spent processing request')

    def collect(self):
        with self.cpu_load_collector_run_time.time():
            worker_stat_cpu_load = GaugeMetricFamily(
                'solaris_exporter_cpu_load',
                'python psutil counters, system load avg.',
                labels=['host', 'statistic'])
            cpuinfo = os.getloadavg()
            worker_stat_cpu_load.add_metric([host_name, 'load1m'], cpuinfo[0])
            worker_stat_cpu_load.add_metric([host_name, 'load5m  '],
                                            cpuinfo[1])
            worker_stat_cpu_load.add_metric([host_name, 'load15m'], cpuinfo[2])
            cpuinfo = len(psutil.cpu_percent(interval=None, percpu=True))
            worker_stat_cpu_load.add_metric([host_name, 'vcpu'], cpuinfo)
        yield worker_stat_cpu_load
Ejemplo n.º 27
0
    def buckets_to_metrics(self, metric_name, buckets):
        # Converts raw bucket metric into sorted list of buckets
        unit = buckets['boundary_unit']
        description = 'libmedida metric type: ' + buckets['type']
        c = Counter(metric_name + '_count',
                    description,
                    self.label_names,
                    registry=self.registry)
        s = Counter(metric_name + '_sum',
                    description,
                    self.label_names,
                    registry=self.registry)
        g = Gauge(metric_name + '_bucket',
                  description,
                  self.label_names + ['le'],
                  registry=self.registry)

        measurements = []
        for bucket in buckets['buckets']:
            measurements.append({
                'boundary':
                self.duration_to_seconds(bucket['boundary'], unit),
                'count':
                bucket['count'],
                'sum':
                bucket['sum']
            })
        count = 0
        for m in sorted(measurements, key=lambda i: i['boundary']):
            # Buckets from core contain only values from their respective ranges.
            # Prometheus expects "le" buckets to be cummulative so we need some extra math
            count += m['count']
            c.labels(*self.labels).inc(m['count'])
            s.labels(*self.labels).inc(self.duration_to_seconds(
                m['sum'], unit))
            # Treat buckets larger than 30d as infinity
            if float(m['boundary']) > 30 * 86400:
                g.labels(*self.labels + ['+Inf']).inc(count)
            else:
                g.labels(*self.labels + [m['boundary']]).inc(count)
Ejemplo n.º 28
0
class TinvestCollector(object):
    account_id: str
    client: SyncClient
    registry: CollectorRegistry = CollectorRegistry()

    position_average_price: Gauge = Gauge("position_average_price",
                                          "Average position price",
                                          labelnames=["name", "currency"],
                                          registry=registry)
    position_average_price_no_nkd: Gauge = Gauge(
        "position_average_price_no_nkd",
        "Average position price",
        labelnames=["name", "currency"],
        registry=registry)
    position_last_price: Gauge = Gauge("position_last_price",
                                       "Position last price",
                                       labelnames=["name"],
                                       registry=registry)
    position_close_price: Gauge = Gauge("position_close_price",
                                        "Position close price",
                                        labelnames=["name"],
                                        registry=registry)
    position_expected_yield: Gauge = Gauge("position_expected_yield",
                                           "Expected yield",
                                           labelnames=["name", "currency"],
                                           registry=registry)
    position_balance: Gauge = Gauge("position_balance",
                                    "Balance",
                                    labelnames=["name"],
                                    registry=registry)
    position_info: Info = Info("position",
                               "Position information",
                               labelnames=["name"],
                               registry=registry)
    position_lots: Gauge = Gauge("position_lots",
                                 "Lots count",
                                 labelnames=["name"],
                                 registry=registry)

    currency_balance: Gauge = Gauge("currency_balance",
                                    "Currency balance",
                                    labelnames=["name"],
                                    registry=registry)

    etf_info: Info = Info("etf",
                          "ETF information",
                          labelnames=["name"],
                          registry=registry)
    etf_last_price: Gauge = Gauge("etf_last_price",
                                  "ETF last price",
                                  labelnames=["name"],
                                  registry=registry)

    operation_commission: Gauge = Gauge(
        "operation_commission",
        "Operations commission",
        labelnames=["figi", "currency", "instrument_type", "operation_type"],
        registry=registry)
    operation_broker_commission: Gauge = Gauge(
        "operation_broker_commission",
        "Broker commission",
        labelnames=["figi", "currency", "instrument_type"],
        registry=registry)
    operation_coupon: Gauge = Gauge(
        "operation_coupon",
        "Coupon payments sum",
        labelnames=["figi", "currency", "instrument_type"],
        registry=registry)
    operation_dividend: Gauge = Gauge(
        "operation_dividend",
        "Dividend payments sum",
        labelnames=["figi", "currency", "instrument_type"],
        registry=registry)

    def __init__(self, token: str, account_id: str):
        self.client = SyncClient(token, use_sandbox=False)
        self.account_id = account_id

    def collect(self):
        self.__generate_positions_metrics()
        self.__generate_currencies_metrics()
        self.__generate_etfs_metrics()
        self.__generate_operations_metrics()

        for metric in self.registry.collect():
            yield metric

    def __generate_positions_metrics(self):
        for position in self.__get_positions():
            if position.average_position_price_no_nkd is not None:
                self.position_average_price_no_nkd.labels(position.name,
                                                          position.average_position_price_no_nkd.currency.name). \
                    set(position.average_position_price_no_nkd.value)
                currency = position.average_position_price_no_nkd.currency.name
            else:
                self.position_average_price.labels(
                    position.name, position.average_position_price.currency.name). \
                    set(position.average_position_price.value)
                currency = position.average_position_price.currency.name

            self.position_expected_yield.labels(position.name, position.expected_yield.currency.name). \
                set(position.expected_yield.value)

            self.position_balance.labels(position.name).set(position.balance)

            orders = self.client.get_market_orderbook(position.figi, 0)
            self.position_last_price.labels(position.name).set(
                orders.payload.last_price)
            self.position_close_price.labels(position.name).set(
                orders.payload.close_price)

            self.position_info.labels(position.name).info({
                "currency":
                currency,
                "blocked":
                position.blocked or "0",
                "figi":
                position.figi,
                "instrument_type":
                position.instrument_type,
                "isin":
                position.isin or "",
                "ticker":
                position.ticker,
            })
            self.position_lots.labels(position.name).set(position.lots)

    def __generate_currencies_metrics(self):
        for currency in self.__get_currencies():
            self.currency_balance.labels(currency.currency.name).set(
                currency.balance)

    def __generate_etfs_metrics(self):
        for etf in self.__get_etfs():
            orders = self.client.get_market_orderbook(etf.figi, 0)
            self.etf_last_price.labels(etf.name).set(orders.payload.last_price)

            self.etf_info.labels(etf.name).info({
                "currency":
                etf.currency.name,
                "figi":
                etf.figi,
                "isin":
                etf.isin,
                "ticker":
                etf.ticker,
                "trade_status":
                orders.payload.trade_status.name,
            })

    def __generate_operations_metrics(self):
        for operation in self.__get_operations():
            if operation.commission is not None:
                self.operation_commission.labels(operation.figi, operation.commission.currency.name,
                                                 operation.instrument_type.name, operation.operation_type.name). \
                    inc(float(operation.commission.value))

            if operation.operation_type.name == 'broker_commission':
                self.operation_broker_commission.labels(operation.figi, operation.currency.name,
                                                        operation.instrument_type.name). \
                    inc(float(operation.payment))
            elif operation.operation_type.name == 'coupon':
                self.operation_coupon.labels(operation.figi, operation.currency.name, operation.instrument_type.name). \
                    inc(float(operation.payment))
            elif operation.operation_type.name == 'dividend':
                self.operation_dividend.labels(operation.figi, operation.currency.name,
                                               operation.instrument_type.name). \
                    inc(float(operation.payment))

    def __get_positions(self):
        portfolio = self.client.get_portfolio(self.account_id)
        return portfolio.payload.positions

    def __get_currencies(self):
        currencies = self.client.get_portfolio_currencies(self.account_id)
        return currencies.payload.currencies

    def __get_etfs(self):
        etfs = self.client.get_market_etfs()
        return etfs.payload.instruments

    def __get_operations(self):
        operations = self.client.get_operations(
            datetime.now() - timedelta(minutes=1),
            datetime.now(),
            broker_account_id=self.account_id)
        return operations.payload.operations
Ejemplo n.º 29
0
 def setUp(self):
     self.registry = CollectorRegistry()
     self.gauge = Gauge('g', 'help', registry=self.registry)
Ejemplo n.º 30
0
class TestGauge(unittest.TestCase):
    def setUp(self):
        self.registry = CollectorRegistry()
        self.gauge = Gauge('g', 'help', registry=self.registry)

    def test_gauge(self):
        self.assertEqual(0, self.registry.get_sample_value('g'))
        self.gauge.inc()
        self.assertEqual(1, self.registry.get_sample_value('g'))
        self.gauge.dec(3)
        self.assertEqual(-2, self.registry.get_sample_value('g'))
        self.gauge.set(9)
        self.assertEqual(9, self.registry.get_sample_value('g'))

    def test_inprogress_function_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('g'))

        @self.gauge.track_inprogress()
        def f():
            self.assertEqual(1, self.registry.get_sample_value('g'))

        self.assertEqual(([], None, None, None), inspect.getargspec(f))

        f()
        self.assertEqual(0, self.registry.get_sample_value('g'))

    def test_inprogress_block_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('g'))
        with self.gauge.track_inprogress():
            self.assertEqual(1, self.registry.get_sample_value('g'))
        self.assertEqual(0, self.registry.get_sample_value('g'))

    def test_gauge_function(self):
        x = {}
        self.gauge.set_function(lambda: len(x))
        self.assertEqual(0, self.registry.get_sample_value('g'))
        self.gauge.inc()
        self.assertEqual(0, self.registry.get_sample_value('g'))
        x['a'] = None
        self.assertEqual(1, self.registry.get_sample_value('g'))

    def test_time_function_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('g'))

        @self.gauge.time()
        def f():
            time.sleep(.001)

        self.assertEqual(([], None, None, None), inspect.getargspec(f))

        f()
        self.assertNotEqual(0, self.registry.get_sample_value('g'))

    def test_function_decorator_multithread(self):
        self.assertEqual(0, self.registry.get_sample_value('g'))
        workers = 2
        pool = ThreadPoolExecutor(max_workers=workers)

        @self.gauge.time()
        def f(duration):
            time.sleep(duration)

        expected_duration = 1
        pool.submit(f, expected_duration)
        time.sleep(0.7 * expected_duration)
        pool.submit(f, expected_duration * 2)
        time.sleep(expected_duration)

        rounding_coefficient = 0.9
        adjusted_expected_duration = expected_duration * rounding_coefficient
        self.assertLess(adjusted_expected_duration,
                        self.registry.get_sample_value('g'))
        pool.shutdown(wait=True)

    def test_time_block_decorator(self):
        self.assertEqual(0, self.registry.get_sample_value('g'))
        with self.gauge.time():
            time.sleep(.001)
        self.assertNotEqual(0, self.registry.get_sample_value('g'))