示例#1
0
    def _register(self):
        if self.name in all_gauges.keys():
            logger.warning("%s already registered, reregistering" % (self.name,))
            REGISTRY.unregister(all_gauges.pop(self.name))

        REGISTRY.register(self)
        all_gauges[self.name] = self
示例#2
0
def register_cache(cache_type, cache_name, cache):

    # Check if the metric is already registered. Unregister it, if so.
    # This usually happens during tests, as at runtime these caches are
    # effectively singletons.
    metric_name = "cache_%s_%s" % (cache_type, cache_name)
    if metric_name in collectors_by_name.keys():
        REGISTRY.unregister(collectors_by_name[metric_name])

    class CacheMetric(object):

        hits = 0
        misses = 0
        evicted_size = 0

        def inc_hits(self):
            self.hits += 1

        def inc_misses(self):
            self.misses += 1

        def inc_evictions(self, size=1):
            self.evicted_size += size

        def describe(self):
            return []

        def collect(self):
            try:
                if cache_type == "response_cache":
                    response_cache_size.labels(cache_name).set(len(cache))
                    response_cache_hits.labels(cache_name).set(self.hits)
                    response_cache_evicted.labels(cache_name).set(self.evicted_size)
                    response_cache_total.labels(cache_name).set(self.hits + self.misses)
                else:
                    cache_size.labels(cache_name).set(len(cache))
                    cache_hits.labels(cache_name).set(self.hits)
                    cache_evicted.labels(cache_name).set(self.evicted_size)
                    cache_total.labels(cache_name).set(self.hits + self.misses)
            except Exception as e:
                logger.warn("Error calculating metrics for %s: %s", cache_name, e)
                raise

            yield GaugeMetricFamily("__unused", "")

    metric = CacheMetric()
    REGISTRY.register(metric)
    caches_by_name[cache_name] = cache
    collectors_by_name[metric_name] = metric
    return metric
示例#3
0
def register_cache(cache_type, cache_name, cache, collect_callback=None):
    """Register a cache object for metric collection.

    Args:
        cache_type (str):
        cache_name (str): name of the cache
        cache (object): cache itself
        collect_callback (callable|None): if not None, a function which is called during
            metric collection to update additional metrics.

    Returns:
        CacheMetric: an object which provides inc_{hits,misses,evictions} methods
    """

    # Check if the metric is already registered. Unregister it, if so.
    # This usually happens during tests, as at runtime these caches are
    # effectively singletons.
    metric_name = "cache_%s_%s" % (cache_type, cache_name)
    if metric_name in collectors_by_name.keys():
        REGISTRY.unregister(collectors_by_name[metric_name])

    class CacheMetric(object):

        hits = 0
        misses = 0
        evicted_size = 0

        def inc_hits(self):
            self.hits += 1

        def inc_misses(self):
            self.misses += 1

        def inc_evictions(self, size=1):
            self.evicted_size += size

        def describe(self):
            return []

        def collect(self):
            try:
                if cache_type == "response_cache":
                    response_cache_size.labels(cache_name).set(len(cache))
                    response_cache_hits.labels(cache_name).set(self.hits)
                    response_cache_evicted.labels(cache_name).set(
                        self.evicted_size)
                    response_cache_total.labels(cache_name).set(self.hits +
                                                                self.misses)
                else:
                    cache_size.labels(cache_name).set(len(cache))
                    cache_hits.labels(cache_name).set(self.hits)
                    cache_evicted.labels(cache_name).set(self.evicted_size)
                    cache_total.labels(cache_name).set(self.hits + self.misses)
                if collect_callback:
                    collect_callback()
            except Exception as e:
                logger.warn("Error calculating metrics for %s: %s", cache_name,
                            e)
                raise

            yield GaugeMetricFamily("__unused", "")

    metric = CacheMetric()
    REGISTRY.register(metric)
    caches_by_name[cache_name] = cache
    collectors_by_name[metric_name] = metric
    return metric
    def test_collector_metrics(self):
        self.metrics_yaml = yaml_read('tests/metrics.yaml')
        self.collector_config = yaml_read(os.environ['CONFIG'])
        self.target = os.getenv('TARGET')
        self.token = '2ed214d523-235f-h283-4566-6sf356124fd62::f234234-234'
        # every collector got to be tested in here
        self.random_prometheus_port = random.randrange(9000, 9700, 1)
        print("chosen testport: " + str(self.random_prometheus_port))

        BaseCollector.get_target_tokens = MagicMock(
            return_value={self.target: self.token})
        Vrops.get_token = MagicMock(return_value=(
            "2ed214d523-235f-h283-4566-6sf356124fd62::f234234-234", 200))

        vc = Vcenter(target=self.target, token=self.token)
        vc.name = "vcenter1"
        vc.uuid = "3628-93a1-56e84634050814"

        nsxt_adapter1 = NSXTAdapterInstance(target=self.target,
                                            token=self.token)
        nsxt_adapter2 = NSXTAdapterInstance(target=self.target,
                                            token=self.token)
        nsxt_adapter1.name = "nsxt_adapter1"
        nsxt_adapter2.name = "nsxt_adapter2"
        nsxt_adapter1.uuid = nsxt_adapter2.uuid = "3628-93a1-56e84634050814"

        Vrops.get_adapter = MagicMock(return_value=None)
        Vrops.get_vcenter_adapter = MagicMock(return_value=([vc][0]))
        Vrops.get_nsxt_adapter = MagicMock(
            return_value=([nsxt_adapter1, nsxt_adapter2]))

        # test tool get_resources to create resource objects
        nsxt1 = NSXTManagementCluster()
        nsxt2 = NSXTManagementCluster()
        nsxt3 = NSXTManagementCluster()
        nsxt1.name = "nsxt_mgmt_cluster1"
        nsxt2.name = "nsxt_mgmt_cluster2"
        nsxt3.name = "nsxt_mgmt_cluster3"
        nsxt1.uuid = "5628-9ba1-55e847050815"
        nsxt2.uuid = "3628-93a1-56e84634050814"
        nsxt3.uuid = "7422-91h7-52s842060815"
        nsxt1.resourcekind = nsxt2.resourcekind = nsxt3.resourcekind = "ManagementCluster"
        nsxt1.parent = nsxt2.parent = nsxt3.parent = "3628-93a1-56e84634050814"

        dc1 = Datacenter()
        dc2 = Datacenter()
        dc3 = Datacenter()
        dc1.name = "datacenter1"
        dc2.name = "datacenter2"
        dc3.name = "datacenter3"
        dc1.uuid = "3628-93a1-56e84634050814"
        dc2.uuid = "5628-9ba1-55e847050815"
        dc3.uuid = "7422-91h7-52s842060815"
        dc1.resourcekind = dc2.resourcekind = dc3.resourcekind = "Datacenter"
        dc1.parent = dc2.parent = dc3.parent = "3628-93a1-56e84634050814"

        cl1 = Cluster()
        cl2 = Cluster()
        cl3 = Cluster()
        cl1.name = "cluster1"
        cl2.name = "cluster2"
        cl3.name = "cluster3"
        cl1.uuid = "3628-93a1-56e84634050814"
        cl2.uuid = "5628-9ba1-55e847050815"
        cl3.uuid = "7422-91h7-52s842060815"
        cl1.resourcekind = cl2.resourcekind = cl3.resourcekind = "ClusterComputeResource"
        cl1.parent = cl2.parent = cl3.parent = "3628-93a1-56e84634050814"

        ds1 = Datastore()
        ds2 = Datastore()
        ds3 = Datastore()
        ds1.name = "vmfs_vc-w-0_p_ssd_bb091_001"
        ds2.name = "eph-bb112-1"
        ds3.name = "B121_Management_DS03"
        ds1.uuid = "3628-93a1-56e84634050814"
        ds2.uuid = "5628-9ba1-55e847050815"
        ds3.uuid = "7422-91h7-52s842060815"
        ds1.type = "vmfs_p_ssd"
        ds2.type = "ephemeral"
        ds3.type = "Management"
        ds1.resourcekind = ds2.resourcekind = ds3.resourcekind = "Datastore"
        ds1.parent = ds2.parent = ds3.parent = "7422-91h7-52s842060815"

        hs1 = Host()
        hs2 = Host()
        hs3 = Host()
        hs1.name = "hostsystem1"
        hs2.name = "hostsystem2"
        hs3.name = "hostsystem3"
        hs1.uuid = "3628-93a1-56e84634050814"
        hs2.uuid = "5628-9ba1-55e847050815"
        hs3.uuid = "7422-91h7-52s842060815"
        hs1.resourcekind = hs2.resourcekind = hs3.resourcekind = "HostSystem"
        hs1.parent = hs2.parent = hs3.parent = "7422-91h7-52s842060815"

        vm1 = VirtualMachine()
        vm2 = VirtualMachine()
        vm3 = VirtualMachine()
        vm1.name = "vm1"
        vm2.name = "vm2"
        vm3.name = "vm3"
        vm1.uuid = "3628-93a1-56e84634050814"
        vm2.uuid = "5628-9ba1-55e847050815"
        vm3.uuid = "7422-91h7-52s842060815"
        vm1.resourcekind = vm2.resourcekind = vm3.resourcekind = "VirtualMachine"
        vm1.parent = vm2.parent = vm3.parent = "7422-91h7-52s842060815"

        Vrops.get_nsxt_mgmt_cluster = MagicMock(
            return_value=[nsxt1, nsxt2, nsxt3])
        Vrops.get_datacenter = MagicMock(return_value=[dc1, dc2, dc3])
        Vrops.get_cluster = MagicMock(return_value=[cl1, cl2, cl3])
        Vrops.get_datastores = MagicMock(return_value=[ds1, ds2, ds3])
        Vrops.get_hosts = MagicMock(return_value=[hs1, hs2, hs3])
        Vrops.get_vms = MagicMock(return_value=[vm1, vm2, vm3])

        Vrops.get_latest_stat = MagicMock(return_value=1)
        Vrops.get_property = MagicMock(return_value="test_property")
        Vrops.get_project_ids = MagicMock(
            return_value=[{
                "3628-93a1-56e84634050814": "0815"
            }, {
                "7422-91h7-52s842060815": "0815"
            }, {
                "5628-9ba1-55e847050815": "internal"
            }])
        thread = Thread(target=InventoryBuilder,
                        args=('./tests/test.json', 8000, 180, 300))
        thread.daemon = True
        thread.start()

        for collector in self.metrics_yaml.keys():
            print("\nTesting " + collector)
            class_module = importlib.import_module(f'collectors.{collector}')
            collector_instance = class_module.__getattribute__(collector)()

            if "Stats" in collector:
                multiple_metrics_generated = list()
                for metric in self.collector_config[collector]:
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": metric['key']
                                },
                                "data": [88.0]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": metric['key']
                                },
                                "data": [44.0]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": metric['key']
                                },
                                "data": [55.0]
                            }]
                        }
                    })
                Vrops.get_latest_stats_multiple = MagicMock(
                    return_value=(multiple_metrics_generated, 200, 0.5))

            if "Properties" in collector:
                multiple_metrics_generated = list()
                for metric in self.collector_config[collector]:
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "property-contents": {
                            "property-content": [{
                                "timestamps": [1582797716394],
                                "statKey": metric['key'],
                                "data": [88.0]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "property-contents": {
                            "property-content": [{
                                "timestamps": [1582797716394],
                                "statKey": metric['key'],
                                "values": ["test"]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "property-contents": {
                            "property-content": [{
                                "timestamps": [1582797716394],
                                "statKey": metric['key'],
                                "values": ["test"]
                            }]
                        }
                    })
                Vrops.get_latest_properties_multiple = MagicMock(
                    return_value=(multiple_metrics_generated, 200, 0.5))

            thread_list = list()

            # start prometheus server to provide metrics later on

            thread1 = Thread(target=run_prometheus_server,
                             args=(self.random_prometheus_port,
                                   [collector_instance]))
            thread1.daemon = True
            thread1.start()
            thread_list.append(thread1)
            # give grandpa thread some time to get prometheus started and run a couple intervals of InventoryBuilder
            time.sleep(3)

            print("prometheus query port " + str(self.random_prometheus_port))
            c = http.client.HTTPConnection("localhost:" +
                                           str(self.random_prometheus_port))
            c.request("GET", "/")
            r = c.getresponse()
            self.assertEqual(r.status, 200,
                             "HTTP server return code should be 200")
            self.assertEqual(r.reason, "OK", "HTTP status should be OK")

            data = r.read().decode()
            data_array = data.split('\n')
            metrics = set()
            for entry in data_array:
                if entry.startswith('#'):
                    continue
                if entry.startswith('python_gc'):
                    continue
                if entry.startswith('process_'):
                    continue
                if entry.startswith('python_info'):
                    continue
                split_entry = entry.split("}")
                if len(split_entry) != 2:
                    continue
                metrics.add(split_entry[0] + "}")

            metrics_yaml_list = self.metrics_yaml[collector]
            self.assertTrue(metrics_yaml_list,
                            msg=collector + " has no metrics defined, FIX IT!")
            self.assertTrue(
                metrics,
                msg=collector +
                " is not producing any metrics at all, how should I continue?")

            # check if there are more metrics being produced and they are not listed in metrics.yaml?!
            issubsetdifference = metrics.difference(metrics_yaml_list)
            self.assertTrue(
                metrics.issubset(metrics_yaml_list),
                msg=collector +
                ": metric not covered by testcase, probably missing in yaml\n"
                + "\n".join(issubsetdifference))
            # check if all metrics from yaml are here
            supersetdifference = set(metrics_yaml_list).difference(metrics)
            self.assertTrue(set(metrics).issuperset(metrics_yaml_list),
                            msg=collector + ": missing metrics from yaml:\n" +
                            "\n".join(supersetdifference))

            for t in thread_list:
                t.join(timeout=5)

            # we don't want to have any port locks if prometheus server thread is not shutting down
            self.random_prometheus_port += 1
            REGISTRY.unregister(collector_instance)
示例#5
0
 def shutdown(self) -> None:
     REGISTRY.unregister(self._collector)
import os
import logging
from flask import Flask, redirect, Response
from collector import MarathonAppCollector
from prometheus_client import PROCESS_COLLECTOR
from prometheus_client.core import REGISTRY
from prometheus_client.exposition import generate_latest

MARATHON_URL = os.environ.get('MARATHON_URL', 'http://leader.mesos:8080/')
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.register(MarathonAppCollector(MARATHON_URL))
app = Flask(__name__)


@app.route('/')
def home():
    return redirect('/metrics')


@app.route('/metrics')
def metrics():
    prom_metrics = generate_latest(REGISTRY)
    return Response(prom_metrics, content_type='text/plain')


if __name__ == '__main__':
    log_format = u'[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d}' \
                 u' %(levelname)s - %(message)s'
    logging.basicConfig(
        level=logging.INFO,
        format=log_format,
示例#7
0
    def test_collector_metrics(self):
        metrics_yaml = YamlRead('tests/metrics.yaml').run()

        # every collector got to be tested in here
        random_prometheus_port = random.randrange(9000, 9700, 1)
        print("chosen testport: " + str(random_prometheus_port))

        Resources.get_token = MagicMock(
            return_value="2ed214d523-235f-h283-4566-6sf356124fd62::f234234-234"
        )
        Resources.get_adapter = MagicMock(
            return_value=[{
                'name': "vcenter1",
                'uuid': '5628-9ba1-55e84701'
            }])
        thread = Thread(target=InventoryBuilder,
                        args=(
                            './tests/test.json',
                            80,
                        ))
        thread.daemon = True
        thread.start()

        for collector in metrics_yaml.keys():
            print("\nTesting " + collector)

            # test tool get_resources to create resource objects

            Resources.get_datacenter = MagicMock(
                return_value=[{
                    'name': 'datacenter1',
                    'uuid': '5628-9ba1-55e847050814'
                }, {
                    'name': 'datacenter2',
                    'uuid': '5628-9ba1-55e847050814'
                }])
            Resources.get_cluster = MagicMock(
                return_value=[{
                    'name': 'cluster1',
                    'uuid': '3628-93a1-56e84634050814'
                }, {
                    'name': 'cluster2',
                    'uuid': '5628-9ba1-55e847050814'
                }])
            Resources.get_hosts = MagicMock(
                return_value=[{
                    'name': 'hostsystem1',
                    'uuid': '3628-93a1-56e84634050814'
                }, {
                    'name': 'hostsystem2',
                    'uuid': '5628-9ba1-55e847050814'
                }])
            Resources.get_vmfolders = MagicMock(
                return_value=[{
                    'name': 'vmfolder1',
                    'uuid': '3628-93a1-56e84634050814'
                }, {
                    'name': 'vmfolder2',
                    'uuid': '5628-9ba1-55e847050814'
                }])
            Resources.get_virtualmachines = MagicMock(
                return_value=[{
                    'name': 'vm1',
                    'uuid': '3628-93a1-56e84634050814'
                }, {
                    'name': 'vm2',
                    'uuid': '5628-9ba1-55e847050814'
                }])
            Resources.get_datastores = MagicMock(
                return_value=[{
                    'name': 'datastore1',
                    'uuid': '3628-93a1-56e84634050814'
                }, {
                    'name': 'datastore2',
                    'uuid': '5628-9ba1-55e847050814'
                }])
            Resources.get_resources = MagicMock(
                return_value=[{
                    'name': 'resource1',
                    'uuid': '5628-9ba1-55e847050814'
                }, {
                    'name': 'resource2',
                    'uuid': '5628-9ba1-55e847050814'
                }])
            Resources.get_latest_stat = MagicMock(return_value=1)
            Resources.get_property = MagicMock(return_value="test_property")

            if 'Stats' in collector:
                # mocking all values from yaml
                statkey_yaml = YamlRead('collectors/statkey.yaml').run()
                multiple_metrics_generated = list()
                for statkey_pair in statkey_yaml[collector]:
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": statkey_pair['statkey']
                                },
                                "data": [88.0]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "5628-9ba1-55e847050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": statkey_pair['statkey']
                                },
                                "data": [44.0]
                            }]
                        }
                    })
                Resources.get_latest_stat_multiple = MagicMock(
                    return_value=multiple_metrics_generated)

            if "Properties" in collector:
                propkey_yaml = YamlRead('collectors/property.yaml').run()
                multiple_enum_properties_generated = list()
                if 'enum_metrics' in propkey_yaml[collector]:
                    for propkey_pair in propkey_yaml[collector][
                            'enum_metrics']:
                        multiple_enum_properties_generated.append({
                            'resourceId':
                            '3628-93a1-56e84634050814',
                            'propkey':
                            propkey_pair['property'],
                            'data':
                            0,
                            'latest_state':
                            'test_enum_property'
                        })
                        multiple_enum_properties_generated.append({
                            'resourceId':
                            "5628-9ba1-55e847050814",
                            'propkey':
                            propkey_pair['property'],
                            'data':
                            0,
                            'latest_state':
                            'test_enum_property'
                        })
                Resources.get_latest_enum_properties_multiple = MagicMock(
                    return_value=multiple_enum_properties_generated)

                multiple_number_properties_generated = list()
                if 'number_metrics' in propkey_yaml[collector]:
                    for propkey_pair in propkey_yaml[collector][
                            'number_metrics']:
                        multiple_number_properties_generated.append({
                            'resourceId':
                            '3628-93a1-56e84634050814',
                            'propkey':
                            propkey_pair['property'],
                            'data':
                            19.54
                        })
                        multiple_number_properties_generated.append({
                            'resourceId':
                            "5628-9ba1-55e847050814",
                            'propkey':
                            propkey_pair['property'],
                            'data':
                            '6.5'
                        })
                Resources.get_latest_number_properties_multiple = MagicMock(
                    return_value=multiple_number_properties_generated)

                multiple_info_properties_generated = list()
                if 'info_metrics' in propkey_yaml[collector]:
                    for propkey_pair in propkey_yaml[collector][
                            'info_metrics']:
                        multiple_info_properties_generated.append({
                            'resourceId':
                            '3628-93a1-56e84634050814',
                            'propkey':
                            propkey_pair['property'],
                            'data':
                            'test_info_property'
                        })
                        multiple_info_properties_generated.append({
                            'resourceId':
                            "5628-9ba1-55e847050814",
                            'propkey':
                            propkey_pair['property'],
                            'data':
                            'test_info_property'
                        })
                Resources.get_latest_info_properties_multiple = MagicMock(
                    return_value=multiple_info_properties_generated)

            thread_list = list()

            # start prometheus server to provide metrics later on
            collector_instance = globals()[collector]()
            thread1 = Thread(target=run_prometheus_server,
                             args=(random_prometheus_port,
                                   [collector_instance]))
            thread1.daemon = True
            thread1.start()
            thread_list.append(thread1)
            # give grandpa thread some time to get prometheus started and run a couple intervals of InventoryBuilder
            time.sleep(10)

            print("prometheus query port " + str(random_prometheus_port))
            c = http.client.HTTPConnection("localhost:" +
                                           str(random_prometheus_port))
            c.request("GET", "/")
            r = c.getresponse()

            self.assertEqual(r.status, 200,
                             "HTTP server return code should be 200")
            self.assertEqual(r.reason, "OK", "HTTP status should be OK")

            data = r.read().decode()
            data_array = data.split('\n')
            metrics = list()
            for entry in data_array:
                if entry.startswith('#'):
                    continue
                if entry.startswith('python_gc'):
                    continue
                if entry.startswith('process_'):
                    continue
                if entry.startswith('python_info'):
                    continue
                split_entry = entry.split("}")
                if len(split_entry) != 2:
                    continue
                metrics.append(split_entry[0] + "}")

            metrics_yaml_list = metrics_yaml[collector]['metrics']
            self.assertTrue(metrics_yaml_list,
                            msg=collector + " has no metrics defined, FIX IT!")
            self.assertTrue(
                metrics,
                msg=collector +
                " is not producing any metrics at all, how should I continue?")

            # check if there are more metrics being produced and they are not listed in metrics.yaml?!
            issubsetdifference = set(metrics).difference(metrics_yaml_list)
            self.assertTrue(
                set(metrics).issubset(metrics_yaml_list),
                msg=collector +
                ": metric not covered by testcase, probably missing in yaml\n"
                + "\n".join(issubsetdifference))
            # check if all metrics from yaml are here
            supersetdifference = set(metrics_yaml_list).difference(metrics)
            self.assertTrue(set(metrics).issuperset(metrics_yaml_list),
                            msg=collector + ": missing metrics from yaml:\n" +
                            "\n".join(supersetdifference))

            for t in thread_list:
                t.join(timeout=5)

            # we don't want to have any port locks if prometheus server thread is not shutting down
            random_prometheus_port += 1
            REGISTRY.unregister(collector_instance)
示例#8
0
 def unregister(self):
     if self.isRegister:
         REGISTRY.unregister(self)
         self.isRegister = False
import os
import logging
from flask import Flask, redirect, Response
from collector import MarathonAppCollector
from prometheus_client import PROCESS_COLLECTOR
from prometheus_client.core import REGISTRY
from prometheus_client.exposition import generate_latest

MARATHON_URL = os.environ.get(
        'MARATHON_URL',
        'http://leader.mesos:8080/')
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.register(MarathonAppCollector(MARATHON_URL))
app = Flask(__name__)


@app.route('/')
def home():
    return redirect('/metrics')


@app.route('/metrics')
def metrics():
    prom_metrics = generate_latest(REGISTRY)
    return Response(prom_metrics, content_type='text/plain')


if __name__ == '__main__':
    log_format = u'[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d}' \
                 u' %(levelname)s - %(message)s'
    logging.basicConfig(
def main():
    """Puts the exporter together."""
    # If the session and context keys are not created, their destruction
    # should not be attempted.

    global VERBOSE_LEVEL  # pylint: disable=global-statement

    session = None
    context = None
    resources = None
    try:
        args = parse_args(sys.argv[1:])
        if args.help_creds:
            help_creds()
            sys.exit(0)
        if args.help_metrics:
            help_metrics()
            sys.exit(0)

        VERBOSE_LEVEL = args.verbose

        setup_logging(args.log, args.log_comp)

        log_exporter("---------------- zhmc_prometheus_exporter command "
                     "started ----------------")

        hmccreds_filename = args.c
        verbose("Parsing HMC credentials file: {}".format(hmccreds_filename))
        yaml_creds_content = parse_yaml_file(hmccreds_filename,
                                             'HMC credentials file',
                                             'hmccreds_schema.yaml')
        # metrics is required in the metrics schema:
        yaml_creds = yaml_creds_content["metrics"]
        # extra_labels is optional in the metrics schema:
        yaml_extra_labels = yaml_creds_content.get("extra_labels", [])

        verbose("Parsing metric definition file: {}".format(args.m))
        yaml_metric_content = parse_yaml_file(args.m, 'metric definition file',
                                              'metrics_schema.yaml')
        # metric_groups and metrics are required in the metrics schema:
        yaml_metric_groups = yaml_metric_content['metric_groups']
        yaml_metrics = yaml_metric_content['metrics']

        # Check that the correct format is used in the metrics section
        for mg, yaml_m in yaml_metrics.items():
            yaml_mg = yaml_metric_groups[mg]
            mg_type = yaml_mg.get('type', 'metric')
            if mg_type == 'metric' and not isinstance(yaml_m, dict):
                new_exc = ImproperExit(
                    "Metrics for metric group '{}' of type 'metric' must use "
                    "the dictionary format in metric definition file {}".
                    format(mg, args.m))
                new_exc.__cause__ = None  # pylint: disable=invalid-name
                raise new_exc

        # Unregister the default collectors (Python, Platform)
        if hasattr(REGISTRY, '_collector_to_names'):
            # pylint: disable=protected-access
            for coll in list(REGISTRY._collector_to_names.keys()):
                REGISTRY.unregister(coll)

        verbose("Timeout/retry configuration: "
                "connect: {r.connect_timeout} sec / {r.connect_retries} "
                "retries, read: {r.read_timeout} sec / {r.read_retries} "
                "retries.".format(r=RETRY_TIMEOUT_CONFIG))

        # hmc is required in the HMC creds schema:
        session = create_session(yaml_creds, hmccreds_filename)

        try:
            with zhmc_exceptions(session, hmccreds_filename):
                hmc_version = get_hmc_version(session)
                verbose("HMC version: {}".format(hmc_version))
                context, resources = create_metrics_context(
                    session, yaml_metric_groups, hmc_version)
        except (ConnectionError, AuthError, OtherError) as exc:
            raise ImproperExit(exc)

        extra_labels = {}
        for item in yaml_extra_labels:
            # name, value are required in the HMC creds schema:
            extra_labels[item['name']] = item['value']
        extra_labels_str = ','.join(
            ['{}="{}"'.format(k, v) for k, v in extra_labels.items()])
        verbose("Using extra labels: {}".format(extra_labels_str))

        resource_cache = ResourceCache()
        coll = ZHMCUsageCollector(yaml_creds, session, context, resources,
                                  yaml_metric_groups, yaml_metrics,
                                  extra_labels, args.m, hmccreds_filename,
                                  resource_cache, hmc_version)

        verbose("Registering the collector and performing first collection")
        REGISTRY.register(coll)  # Performs a first collection

        verbose("Starting the HTTP server on port {}".format(args.p))
        start_http_server(int(args.p))

        info("Exporter is up and running on port {}".format(args.p))
        while True:
            try:
                time.sleep(1)
            except KeyboardInterrupt:
                raise ProperExit
    except KeyboardInterrupt:
        info("Exporter interrupted before server start.")
        cleanup(session, context, resources)
        sys.exit(1)
    except EarlyExit as exc:
        info("Error: {}".format(exc), log=False)
        sys.exit(1)
    except ImproperExit as exc:
        info("Error: {}".format(exc))
        cleanup(session, context, resources)
        sys.exit(1)
    except ProperExit:
        info("Exporter interrupted after server start.")
        cleanup(session, context, resources)
        sys.exit(0)
            self.collections_done = 0

    def ensure_refreshed(self):
        if not self.refreshed:
            refresh_page(self.driver)
            self.refreshed = True

    def restart_driver(self):
        self.driver.quit()
        self.start_driver()

    def start_driver(self):
        self.driver = webdriver.Chrome(options=chrome_options)
        login_and_load_fachmann_page(self.driver)
        wait_until_page_loaded(self.driver)
        self.refreshed = True

    def __del__(self):
        print("Shutting down...")
        self.driver.quit()


if __name__ == "__main__":
    for c in [PROCESS_COLLECTOR, PLATFORM_COLLECTOR, GC_COLLECTOR]:
        REGISTRY.unregister(c)
    REGISTRY.register(CustomCollector())
    start_http_server(8000)
    print("Running...")
    while True:
        time.sleep(100)
示例#12
0
    def collector_testrun(self, collector, rubric=None):
        if 'Stats' in collector:
            # mocking all values from yaml
            statkey_yaml = yaml_read(os.environ['CONFIG'])['statkeys']
            multiple_metrics_generated = list()

            def append_metrics(statkey_pair):
                multiple_metrics_generated.append({
                    "resourceId": "3628-93a1-56e84634050814",
                    "stat-list": {
                        "stat": [{
                            "timestamps": [1582797716394],
                            "statKey": {
                                "key": statkey_pair['statkey']
                            },
                            "data": [88.0]
                        }]
                    }
                })
                multiple_metrics_generated.append({
                    "resourceId": "5628-9ba1-55e847050815",
                    "stat-list": {
                        "stat": [{
                            "timestamps": [1582797716394],
                            "statKey": {
                                "key": statkey_pair['statkey']
                            },
                            "data": [44.0]
                        }]
                    }
                })
                multiple_metrics_generated.append({
                    "resourceId": "7422-91h7-52s842060815",
                    "stat-list": {
                        "stat": [{
                            "timestamps": [1582797716394],
                            "statKey": {
                                "key": statkey_pair['statkey']
                            },
                            "data": [55.0]
                        }]
                    }
                })

            if rubric:
                for statkey_pair in statkey_yaml[collector][rubric]:
                    append_metrics(statkey_pair)
            else:
                for statkey_pair in statkey_yaml[collector]:
                    append_metrics(statkey_pair)
            Vrops.get_latest_stat_multiple = MagicMock(
                return_value=multiple_metrics_generated)

        if "Properties" in collector:
            propkey_yaml = yaml_read(os.environ['CONFIG'])['properties']
            multiple_enum_properties_generated = list()
            if 'enum_metrics' in propkey_yaml[collector]:
                for propkey_pair in propkey_yaml[collector]['enum_metrics']:
                    multiple_enum_properties_generated.append({
                        'resourceId':
                        '3628-93a1-56e84634050814',
                        'propkey':
                        propkey_pair['property'],
                        'value':
                        "test_enum_property"
                    })
                    multiple_enum_properties_generated.append({
                        'resourceId':
                        "5628-9ba1-55e847050815",
                        'propkey':
                        propkey_pair['property'],
                        'value':
                        "test_enum_property"
                    })
                    multiple_enum_properties_generated.append({
                        'resourceId':
                        "7422-91h7-52s842060815",
                        'propkey':
                        propkey_pair['property'],
                        'value':
                        "test_enum_property"
                    })
            Vrops.get_latest_enum_properties_multiple = MagicMock(
                return_value=multiple_enum_properties_generated)

            multiple_number_properties_generated = list()
            if 'number_metrics' in propkey_yaml[collector]:
                for propkey_pair in propkey_yaml[collector]['number_metrics']:
                    multiple_number_properties_generated.append({
                        'resourceId':
                        '3628-93a1-56e84634050814',
                        'propkey':
                        propkey_pair['property'],
                        'data':
                        19.54
                    })
                    multiple_number_properties_generated.append({
                        'resourceId':
                        "5628-9ba1-55e847050815",
                        'propkey':
                        propkey_pair['property'],
                        'data':
                        '6.5'
                    })
                    multiple_number_properties_generated.append({
                        'resourceId':
                        "7422-91h7-52s842060815",
                        'propkey':
                        propkey_pair['property'],
                        'data':
                        33
                    })
            Vrops.get_latest_number_properties_multiple = MagicMock(
                return_value=multiple_number_properties_generated)

            multiple_info_properties_generated = list()
            if 'info_metrics' in propkey_yaml[collector]:
                for propkey_pair in propkey_yaml[collector]['info_metrics']:
                    multiple_info_properties_generated.append({
                        'resourceId':
                        '3628-93a1-56e84634050814',
                        'propkey':
                        propkey_pair['property'],
                        'data':
                        'test_info_property'
                    })
                    multiple_info_properties_generated.append({
                        'resourceId':
                        "5628-9ba1-55e847050815",
                        'propkey':
                        propkey_pair['property'],
                        'data':
                        'test_info_property'
                    })
                    multiple_info_properties_generated.append({
                        'resourceId':
                        "7422-91h7-52s842060815",
                        'propkey':
                        propkey_pair['property'],
                        'data':
                        'test_info_property'
                    })
            Vrops.get_latest_info_properties_multiple = MagicMock(
                return_value=multiple_info_properties_generated)

        thread_list = list()

        # start prometheus server to provide metrics later on
        collector_instance = globals()[collector]()
        thread1 = Thread(target=run_prometheus_server,
                         args=(self.random_prometheus_port,
                               [collector_instance]))
        thread1.daemon = True
        thread1.start()
        thread_list.append(thread1)
        # give grandpa thread some time to get prometheus started and run a couple intervals of InventoryBuilder
        time.sleep(10)

        print("prometheus query port " + str(self.random_prometheus_port))
        c = http.client.HTTPConnection("localhost:" +
                                       str(self.random_prometheus_port))
        c.request("GET", "/")
        r = c.getresponse()
        self.assertEqual(r.status, 200,
                         "HTTP server return code should be 200")
        self.assertEqual(r.reason, "OK", "HTTP status should be OK")

        data = r.read().decode()
        data_array = data.split('\n')
        metrics = set()
        for entry in data_array:
            if entry.startswith('#'):
                continue
            if entry.startswith('python_gc'):
                continue
            if entry.startswith('process_'):
                continue
            if entry.startswith('python_info'):
                continue
            split_entry = entry.split("}")
            if len(split_entry) != 2:
                continue
            metrics.add(split_entry[0] + "}")

        if rubric:
            metrics_yaml_list = self.metrics_yaml[collector]['metrics'][rubric]
        else:
            metrics_yaml_list = self.metrics_yaml[collector]['metrics']
        self.assertTrue(metrics_yaml_list,
                        msg=collector + " has no metrics defined, FIX IT!")
        self.assertTrue(
            metrics,
            msg=collector +
            " is not producing any metrics at all, how should I continue?")

        # check if there are more metrics being produced and they are not listed in metrics.yaml?!
        issubsetdifference = metrics.difference(metrics_yaml_list)
        self.assertTrue(
            metrics.issubset(metrics_yaml_list),
            msg=collector +
            ": metric not covered by testcase, probably missing in yaml\n" +
            "\n".join(issubsetdifference))
        # check if all metrics from yaml are here
        supersetdifference = set(metrics_yaml_list).difference(metrics)
        self.assertTrue(set(metrics).issuperset(metrics_yaml_list),
                        msg=collector + ": missing metrics from yaml:\n" +
                        "\n".join(supersetdifference))

        for t in thread_list:
            t.join(timeout=5)

        # we don't want to have any port locks if prometheus server thread is not shutting down
        self.random_prometheus_port += 1
        REGISTRY.unregister(collector_instance)
def main():
    """Puts the exporter together."""
    # If the session and context keys are not created, their destruction
    # should not be attempted.

    global VERBOSE_LEVEL  # pylint: disable=global-statement

    session = False
    context = False
    try:
        args = parse_args(sys.argv[1:])
        if args.help_creds:
            help_creds()
            sys.exit(0)
        if args.help_metrics:
            help_metrics()
            sys.exit(0)
        VERBOSE_LEVEL = args.verbose

        setup_logging(args.log, args.log_comp)

        verbose("Parsing HMC credentials file: {}".format(args.c))
        yaml_creds_content = parse_yaml_file(
            args.c, 'HMC credentials file', 'hmccreds_schema.yaml')
        # metrics is required in the metrics schema:
        yaml_creds = yaml_creds_content["metrics"]
        # extra_labels is optional in the metrics schema:
        yaml_extra_labels = yaml_creds_content.get("extra_labels", [])

        verbose("Parsing metric definition file: {}".format(args.m))
        yaml_metric_content = parse_yaml_file(
            args.m, 'metric definition file', 'metrics_schema.yaml')
        # metric_groups and metrics are required in the metrics schema:
        yaml_metric_groups = yaml_metric_content['metric_groups']
        yaml_metrics = yaml_metric_content['metrics']

        # Unregister the default collectors (Python, Platform)
        if hasattr(REGISTRY, '_collector_to_names'):
            # pylint: disable=protected-access
            for coll in list(REGISTRY._collector_to_names.keys()):
                REGISTRY.unregister(coll)

        # hmc is required in the HMC creds schema:
        verbose("Creating a session with HMC {}".format(yaml_creds['hmc']))
        session = create_session(yaml_creds)

        hmc_version = get_hmc_version(session, args.c)
        verbose("HMC version: {}".format(hmc_version))

        try:
            context = create_metrics_context(session, yaml_metric_groups,
                                             args.c, hmc_version)
        except (ConnectionError, AuthError, OtherError) as exc:
            raise ImproperExit(exc)

        extra_labels = dict()
        for item in yaml_extra_labels:
            # name, value are required in the HMC creds schema:
            extra_labels[item['name']] = item['value']
        extra_labels_str = ','.join(
            ['{}="{}"'.format(k, v) for k, v in extra_labels.items()])
        verbose("Using extra labels: {}".format(extra_labels_str))

        resource_cache = ResourceCache()
        coll = ZHMCUsageCollector(
            yaml_creds, session, context, yaml_metric_groups, yaml_metrics,
            extra_labels, args.m, args.c, resource_cache, hmc_version)

        verbose("Registering the collector and performing first collection")
        REGISTRY.register(coll)  # Performs a first collection

        verbose("Starting the HTTP server on port {}".format(args.p))
        start_http_server(int(args.p))

        verbose("Exporter is up and running")
        while True:
            try:
                time.sleep(1)
            except KeyboardInterrupt:
                raise ProperExit
    except KeyboardInterrupt:
        print("Exporter interrupted before server start.")
        delete_metrics_context(session, context)
        sys.exit(1)
    except EarlyExit as exc:
        print("Error: {}".format(exc))
        sys.exit(1)
    except ImproperExit as exc:
        print("Error: {}".format(exc))
        delete_metrics_context(session, context)
        sys.exit(1)
    except ProperExit:
        print("Exporter interrupted after server start.")
        delete_metrics_context(session, context)
        sys.exit(0)
示例#14
0
        j_dbname + '?user='******'&password='******'echo "' + REQUEST + '" | ' + 'psql "' + \
    clear_url(clear_unnecessary_from_string(CONNECT_STRING)) + '"  | awk "{print $1}"'

time_pattern = r'\d\d:\d\d:\d\d'

if DISABLE_DEFAULT_METRICS == "True":
    for coll in list(REGISTRY._collector_to_names.keys()):
        REGISTRY.unregister(coll)


def run_request(command):
    ps = subprocess.Popen(command,
                          shell=True,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT)
    output = ps.communicate()[0]
    output = output.decode("utf-8")
    ps.stdout.close()
    return output


# We give metrics
示例#15
0
]

try:
    LINKY_EXPORTER_PORT = int(os.environ.get('LINKY_EXPORTER_PORT', '8123'))
except ValueError:
    logging.error("LINKY_EXPORTER_PORT must be int !")
    sys.exit(1)

LINKY_EXPORTER_MODE = os.environ.get('LINKY_EXPORTER_MODE', 'HISTORIQUE')
VALID_MODE = [i['name'] for i in LINKY_MODE]
if not LINKY_EXPORTER_MODE in VALID_MODE:
    logging.error("LINKY_EXPORTER_MODE must be : %s", ' or '.join(VALID_MODE))
    sys.exit(1)

# REGISTRY Configuration
REGISTRY.unregister(PROCESS_COLLECTOR)
REGISTRY.unregister(PLATFORM_COLLECTOR)
REGISTRY.unregister(REGISTRY._names_to_collectors['python_gc_objects_collected_total'])

# Linky Collector Class
class LinkyCollector():
    '''Linky Collector Class'''
    def __init__(self):
        self.ser = self._check_for_valid_frame()

    def teleinfo(self):
        '''Read Teleinfo And Return Linky Frame Dict'''
        logging.debug("Reading Linky Teleinfo on %s.", LINKY_EXPORTER_INTERFACE)

        with self.ser:
            # Wait For New Linky Frame (Start with 0x02)
    )


class Expositor(object):
    """ Responsible for exposing metrics to prometheus """

    def collect(self):
        logging.info("Serving prometheus data")
        for key in sorted(metrics):
            yield metrics[key]


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    for collector in list(REGISTRY._collector_to_names):
        REGISTRY.unregister(collector)
    REGISTRY.register(Expositor())

    start_time = time.time()

    # Popluate data before exposing over http
    scrape()
    start_http_server(8000)

    ready_time = time.time()
    print("Ready time: ", ready_time-start_time)

    while True:
        time.sleep(int(os.environ.get('KOJI_POLL_INTERVAL', '3')))
        scrape()
示例#17
0
def prometheus_registry():
    from prometheus_client.core import REGISTRY
    yield REGISTRY
    for c in REGISTRY._collector_to_names.keys():
        REGISTRY.unregister(c)
示例#18
0
            "The datetime the record was taken",
            value=int(datetime.now().timestamp()),
        )
        yield g

        ss = {
            states[sta]: True if data["status"] == states[sta] else False
            for sta in states
        }
        s = StateSetMetricFamily("solar_state", "The state of the inverter",
                                 ss)
        yield s


if __name__ == "__main__":
    REGISTRY.unregister(PROCESS_COLLECTOR)
    REGISTRY.unregister(PLATFORM_COLLECTOR)
    REGISTRY.unregister(GC_COLLECTOR)

    REGISTRY.register(CustomCollector())
    start_http_server(int(get_config("local_port")))
    while True:
        time.sleep(1)

#
# url_to_scrape = "https://solar.d.sawrc.com/realtime.csv"
# auth = httpx.BasicAuth("root", "shimopa55l!nk")
#
# "Time;Udc1[V];Idc1[A];Pdc1[W];Udc2[V];Idc2[A];Pdc2[W];Uac1[V];Iac1[A];Uac2[V];Iac2[A];Uac3[V];Iac3[A];Pdc[W];Pac[W];Tsys[C]"
#
# output_data = httpx.get(url_to_scrape, auth=auth)
示例#19
0
    def test_collector_metrics(self):
        self.metrics_yaml = yaml_read('tests/metrics.yaml')
        self.collector_config = yaml_read(os.environ['CONFIG'])
        # every collector got to be tested in here
        self.random_prometheus_port = random.randrange(9000, 9700, 1)
        print("chosen testport: " + str(self.random_prometheus_port))

        BaseCollector.get_target_tokens = MagicMock(
            return_value={
                'testhost.test':
                '2ed214d523-235f-h283-4566-6sf356124fd62::f234234-234'
            })
        Vrops.get_token = MagicMock(return_value=(
            "2ed214d523-235f-h283-4566-6sf356124fd62::f234234-234", 200))
        Vrops.get_adapter = MagicMock(
            return_value=("vcenter1", "3628-93a1-56e84634050814"))
        # test tool get_resources to create resource objects

        Vrops.get_datacenter = MagicMock(
            return_value=[{
                'name': 'datacenter1',
                'uuid': '3628-93a1-56e84634050814',
                'resourcekind': 'Datacenter',
                'parent': '3628-93a1-56e84634050814'
            }, {
                'name': 'datacenter2',
                'uuid': '5628-9ba1-55e847050815',
                'resourcekind': 'Datacenter',
                'parent': '3628-93a1-56e84634050814'
            }, {
                'name': 'datacenter3',
                'uuid': '7422-91h7-52s842060815',
                'resourcekind': 'Datacenter',
                'parent': '3628-93a1-56e84634050814'
            }])
        Vrops.get_cluster_and_datastores = MagicMock(
            return_value=[{
                'name': 'cluster1',
                'uuid': '3628-93a1-56e84634050814',
                'resourcekind': 'ClusterComputeResource',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'cluster2',
                'uuid': '5628-9ba1-55e847050815',
                'resourcekind': 'ClusterComputeResource',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'cluster3',
                'uuid': '7422-91h7-52s842060815',
                'resourcekind': 'ClusterComputeResource',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'vmfs_vc-w-0_p_ssd_bb091_001',
                'uuid': '3628-93a1-56e84634050814',
                'resourcekind': 'Datastore',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'eph-bb112-1',
                'uuid': '5628-9ba1-55e847050815',
                'resourcekind': 'Datastore',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'B121_Management_DS03',
                'uuid': '7422-91h7-52s842060815',
                'resourcekind': 'Datastore',
                'parent': '7422-91h7-52s842060815'
            }])
        Vrops.get_hosts = MagicMock(
            return_value=[{
                'name': 'hostsystem1',
                'uuid': '3628-93a1-56e84634050814',
                'resourcekind': 'HostSystem',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'hostsystem2',
                'uuid': '5628-9ba1-55e847050815',
                'resourcekind': 'HostSystem',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'hostsystem3',
                'uuid': '7422-91h7-52s842060815',
                'resourcekind': 'HostSystem',
                'parent': '7422-91h7-52s842060815'
            }])
        Vrops.get_vms = MagicMock(
            return_value=[{
                'name': 'vm1',
                'uuid': '3628-93a1-56e84634050814',
                'resourcekind': 'VirtualMachine',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'vm2',
                'uuid': '5628-9ba1-55e847050815',
                'resourcekind': 'VirtualMachine',
                'parent': '7422-91h7-52s842060815'
            }, {
                'name': 'vm3',
                'uuid': '7422-91h7-52s842060815',
                'resourcekind': 'VirtualMachine',
                'parent': '7422-91h7-52s842060815'
            }])
        Vrops.get_latest_stat = MagicMock(return_value=1)
        Vrops.get_property = MagicMock(return_value="test_property")
        Vrops.get_project_ids = MagicMock(
            return_value=[{
                "3628-93a1-56e84634050814": "0815"
            }, {
                "7422-91h7-52s842060815": "0815"
            }, {
                "5628-9ba1-55e847050815": "internal"
            }])
        thread = Thread(target=InventoryBuilder,
                        args=('./tests/test.json', 8000, 180, 300))
        thread.daemon = True
        thread.start()

        for collector in self.metrics_yaml.keys():
            print("\nTesting " + collector)

            if "Stats" in collector:
                multiple_metrics_generated = list()
                for metric in self.collector_config[collector]:
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": metric['key']
                                },
                                "data": [88.0]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": metric['key']
                                },
                                "data": [44.0]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "stat-list": {
                            "stat": [{
                                "timestamps": [1582797716394],
                                "statKey": {
                                    "key": metric['key']
                                },
                                "data": [55.0]
                            }]
                        }
                    })
                Vrops.get_latest_stats_multiple = MagicMock(
                    return_value=(multiple_metrics_generated, 200, 0.5))

            if "Properties" in collector:
                multiple_metrics_generated = list()
                for metric in self.collector_config[collector]:
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "property-contents": {
                            "property-content": [{
                                "timestamps": [1582797716394],
                                "statKey": metric['key'],
                                "data": [88.0]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "property-contents": {
                            "property-content": [{
                                "timestamps": [1582797716394],
                                "statKey": metric['key'],
                                "values": ["test"]
                            }]
                        }
                    })
                    multiple_metrics_generated.append({
                        "resourceId": "3628-93a1-56e84634050814",
                        "property-contents": {
                            "property-content": [{
                                "timestamps": [1582797716394],
                                "statKey": metric['key'],
                                "values": ["test"]
                            }]
                        }
                    })
                Vrops.get_latest_properties_multiple = MagicMock(
                    return_value=(multiple_metrics_generated, 200, 0.5))

            thread_list = list()

            # start prometheus server to provide metrics later on
            collector_instance = globals()[collector]()
            thread1 = Thread(target=run_prometheus_server,
                             args=(self.random_prometheus_port,
                                   [collector_instance]))
            thread1.daemon = True
            thread1.start()
            thread_list.append(thread1)
            # give grandpa thread some time to get prometheus started and run a couple intervals of InventoryBuilder
            time.sleep(3)

            print("prometheus query port " + str(self.random_prometheus_port))
            c = http.client.HTTPConnection("localhost:" +
                                           str(self.random_prometheus_port))
            c.request("GET", "/")
            r = c.getresponse()
            self.assertEqual(r.status, 200,
                             "HTTP server return code should be 200")
            self.assertEqual(r.reason, "OK", "HTTP status should be OK")

            data = r.read().decode()
            data_array = data.split('\n')
            metrics = set()
            for entry in data_array:
                if entry.startswith('#'):
                    continue
                if entry.startswith('python_gc'):
                    continue
                if entry.startswith('process_'):
                    continue
                if entry.startswith('python_info'):
                    continue
                split_entry = entry.split("}")
                if len(split_entry) != 2:
                    continue
                metrics.add(split_entry[0] + "}")

            metrics_yaml_list = self.metrics_yaml[collector]
            self.assertTrue(metrics_yaml_list,
                            msg=collector + " has no metrics defined, FIX IT!")
            self.assertTrue(
                metrics,
                msg=collector +
                " is not producing any metrics at all, how should I continue?")

            # check if there are more metrics being produced and they are not listed in metrics.yaml?!
            issubsetdifference = metrics.difference(metrics_yaml_list)
            self.assertTrue(
                metrics.issubset(metrics_yaml_list),
                msg=collector +
                ": metric not covered by testcase, probably missing in yaml\n"
                + "\n".join(issubsetdifference))
            # check if all metrics from yaml are here
            supersetdifference = set(metrics_yaml_list).difference(metrics)
            self.assertTrue(set(metrics).issuperset(metrics_yaml_list),
                            msg=collector + ": missing metrics from yaml:\n" +
                            "\n".join(supersetdifference))

            for t in thread_list:
                t.join(timeout=5)

            # we don't want to have any port locks if prometheus server thread is not shutting down
            self.random_prometheus_port += 1
            REGISTRY.unregister(collector_instance)