Пример #1
0
def createGaugeAndPushToGateway(reporter_name, reporter_value, report_type):
    registry = CollectorRegistry()

    if report_type is ReportType.metric:
        label_names = ['metric_name', 'push_gateway_type']
        label_values = [reporter_name, 'metric']
        gauge_name = REPORTER_PUSH_GATEWAY_METRIC_PREFIX + "_" + reporter_name
        gauge_value = reporter_value

    else:
        label_names = ['param_name', 'param_value', 'push_gateway_type']
        label_values = [reporter_name, reporter_value, 'parameter']
        gauge_name = REPORTER_PUSH_GATEWAY_METRIC_PARAMETER + "_" + reporter_name
        gauge_value = 1

    gauge = Gauge(name=gauge_name,
                  documentation="",
                  labelnames=label_names,
                  registry=registry)

    gauge.labels(*label_values).set(gauge_value)

    pushadd_to_gateway(gateway=environ[GATEWAY_URL_KEY],
                       job=PUSH_GATEWAY_JOB_NAME,
                       registry=registry,
                       grouping_key={GROUPING_KEY: environ[GROUPING_KEY]})
Пример #2
0
    def get_network_metrics(self):
        while self.start_monitoring:

            self.monitor_lock.acquire()

            # group metrics by dpid to optimize the rest api calls
            dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]
            dpid_set = set(dpid_list)

            for dpid in dpid_set:

                # query Ryu
                ret = self.net.ryu_REST('stats/port', dpid=dpid)
                if isinstance(ret, dict):
                    port_stat_dict = ret
                elif isinstance(ret, basestring):
                    port_stat_dict = ast.literal_eval(ret.rstrip())
                else:
                    port_stat_dict = None

                metric_list = [metric_dict for metric_dict in self.network_metrics
                               if int(metric_dict['switch_dpid'])==int(dpid)]

                for metric_dict in metric_list:
                    self.set_network_metric(metric_dict, port_stat_dict)

            try:
                if len(self.network_metrics) > 0:
                    pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
            except Exception, e:
                logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))

            self.monitor_lock.release()
            time.sleep(1)
Пример #3
0
    def push2gateway(self):

        self.g = Gauge('gene_push',
                       'status', [
                           'store_name', 'client_IP', 'snapshot_time',
                           'read_bytes', 'write_bytes'
                       ],
                       registry=self.registry)

        dict_list = self.execute_some_command(command)

        for dict_tmp in dict_list:
            store = dict_tmp['store']
            client_IP = dict_tmp['client_IP']
            snapshot_time = dict_tmp['snapshot_time']
            read_bytes = dict_tmp['read_bytes']
            write_bytes = dict_tmp['write_bytes']

            self.g.labels(store, client_IP, snapshot_time, read_bytes,
                          write_bytes)

        pushadd_to_gateway(self.target,
                           job='gene_pushgateway',
                           registry=self.registry,
                           timeout=200)
Пример #4
0
 def export_metrics(self):
     while True:
         # push metrics to gateway
         pushadd_to_gateway(pushgateway,
                            job='squid_client',
                            registry=self.registry)
         sleep(1)
Пример #5
0
    def push2gateway(self):

        self.g = Gauge('gene_push',
                       'status', [
                           'server', 'store_name', 'clentIP', 'read_bytes',
                           'write_bytes', 'snapshot_time'
                       ],
                       registry=self.registry)

        snapshot_time = time.time()
        result = self.execute_some_command(command)

        for dict_key in result.keys():
            server = dict_key
            store_lists = result.get(server)

        for store_dict in store_lists:
            for store in store_dict.keys():
                for client_dict in store_dict.get(store):
                    for k, v in client_dict.items():
                        if k == 'read':
                            read = v
                        elif k == 'write':
                            write = v
                        else:
                            client = v
                    self.g.labels(server, store, client, read, write,
                                  snapshot_time)

        pushadd_to_gateway(self.target,
                           job='gene_pushgateway',
                           registry=self.registry,
                           timeout=200)
Пример #6
0
    def get_network_metrics(self):
        while self.start_monitoring:

            self.monitor_lock.acquire()

            # group metrics by dpid to optimize the rest api calls
            dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]
            dpid_set = set(dpid_list)

            for dpid in dpid_set:

                # query Ryu
                ret = self.net.ryu_REST('stats/port', dpid=dpid)
                if isinstance(ret, dict):
                    port_stat_dict = ret
                elif isinstance(ret, basestring):
                    port_stat_dict = ast.literal_eval(ret.rstrip())
                else:
                    port_stat_dict = None

                metric_list = [metric_dict for metric_dict in self.network_metrics
                               if int(metric_dict['switch_dpid'])==int(dpid)]

                for metric_dict in metric_list:
                    self.set_network_metric(metric_dict, port_stat_dict)

            try:
                if len(self.network_metrics) > 0:
                    pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
            except Exception, e:
                logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))

            self.monitor_lock.release()
            time.sleep(1)
Пример #7
0
    def push(self,
             job,
             metric,
             value,
             labels,
             metric_type='Gauge',
             metric_desc='metric'):
        # 获取指标类型函数
        metricFunc = validate_mertric_type(metric_type)

        # labels必须是字典类型,如{'hostname': 'c1.heboan.com', 'ip': '192.168.88.1'}
        if not isinstance(labels, dict):
            msg = "tag parameter must be of dict type"
            raise Exception(msg)

        f = metricFunc(metric,
                       metric_desc,
                       labels.keys(),
                       registry=self.registry)
        f.labels(*labels.values()).set(value)

        # f = metricFunc(metric, metric_desc, ['hostname', 'ip'], registry=self.registry)
        #f.labels('c1.heboan.com', '192.168.1.100').set(value)

        # job和metric都是一样的会,值会被覆盖,一般一个job里面关联多个metric
        pushadd_to_gateway(self.server, job=job, registry=f, timeout=200)
Пример #8
0
    def set_flow_metric(self, metric_dict, flow_stat_dict):
        # vnf tx is the datacenter switch rx and vice-versa
        metric_key = metric_dict['metric_key']
        switch_dpid = metric_dict['switch_dpid']
        vnf_name = metric_dict['vnf_name']
        vnf_interface = metric_dict['vnf_interface']
        previous_measurement = metric_dict['previous_measurement']
        previous_monitor_time = metric_dict['previous_monitor_time']
        cookie = metric_dict['cookie']

        # TODO aggregate all found flow stats
        #flow_stat = flow_stat_dict[str(switch_dpid)][0]
        #if 'bytes' in metric_key:
        #    counter = flow_stat['byte_count']
        #elif 'packet' in metric_key:
        #    counter = flow_stat['packet_count']

        counter = 0
        for flow_stat in flow_stat_dict[str(switch_dpid)]:
            if 'bytes' in metric_key:
                counter += flow_stat['byte_count']
            elif 'packet' in metric_key:
                counter += flow_stat['packet_count']

        flow_stat = flow_stat_dict[str(switch_dpid)][0]
        flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)

        self.prom_metrics[metric_dict['metric_key']]. \
            labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': cookie}). \
            set(counter)
        pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
Пример #9
0
def extract_image(image_name: str, timeout: int = None, *, registry_credentials: str = None,
                  tls_verify: bool=True) -> dict:
    """Extract dependencies from an image."""
    #Start prometheus
    prometheus_registry = CollectorRegistry()
    metric_analyzer_job = Gauge('package_extract_time','Runtime of package extract job', registry=prometheus_registry)
    #Begin the timer for when the job starts
    with metric_analyzer_job.time():
        image_name = quote(image_name)
        with tempfile.TemporaryDirectory() as dir_path:
            download_image(
                image_name,
                dir_path,
                timeout=timeout or None,
                registry_credentials=registry_credentials or None,
                tls_verify=tls_verify
            )
        rootfs_path = os.path.join(dir_path, 'rootfs')
        layers = construct_rootfs(dir_path, rootfs_path)

        result = run_analyzers(rootfs_path)
        result['layers'] = layers
    push_gateway = os.getenv('PROMETHEUS_PUSH_GATEWAY', 'pushgateway:9091')
    if push_gateway:
        try:
            pushadd_to_gateway(push_gateway, job='package-extract-runtime',registry=prometheus_registry)
        except Exception as e:
            _LOGGER.exception('An error occurred pushing the metrics: {}'.format(str(e)))
    return result
Пример #10
0
 def pushadd_to_gateway(self,
                        job,
                        grouping_key=None,
                        handler=default_handler):
     """PushAdd metrics to the given pushgateway.
     `job` is the job label to be attached to all pushed metrics
     `registry` is an instance of CollectorRegistry
     `grouping_key` please see the pushgateway documentation for details.
                 Defaults to None
     `handler` is an optional function which can be provided to perform
             requests to the 'gateway'.
             Defaults to None, in which case an http or https request
             will be carried out by a default handler.
             See the 'prometheus_client.push_to_gateway' documentation
             for implementation requirements.
     This replaces metrics with the same name, job and grouping_key.
     This uses the POST HTTP method."""
     prometheus_client.pushadd_to_gateway(
         gateway=self.gateway,
         job=job,
         registry=self.registry,
         grouping_key=grouping_key,
         timeout=self.timeout,
         handler=handler,
     )
Пример #11
0
def export_metrics():
    while True:
        # push metrics to gateway
        pushadd_to_gateway(PUSHGATEWAY,
                           job='vcdn_client',
                           registry=VCDN_REGISTRY)
        sleep(1)
Пример #12
0
    def pytest_runtest_logreport(self, report):
        if report.when == 'call':
            registry = CollectorRegistry()
            name = '{prefix}{funcname}'.format(prefix=self.prefix,
                                               funcname=report.location[2])
            description = self.pattern.sub('_', report.nodeid)
            print(description)
            name = '{prefix}{funcname}'.format(prefix=self.prefix,
                                               funcname=description)
            name2 = '{name}_duration'.format(name=name)

            metric = Gauge(name,
                           report.nodeid,
                           self.extra_labels.keys(),
                           registry=registry)
            metric.labels(**self.extra_labels).set(1 if report.outcome ==
                                                   'passed' else 0)

            duration = Gauge(name2,
                             report.nodeid,
                             self.extra_labels.keys(),
                             registry=registry)
            duration.labels(**self.extra_labels).set(report.duration)

            pushadd_to_gateway(self.pushgateway_url,
                               registry=registry,
                               job=self.job_name)
Пример #13
0
    def set_flow_metric(self, metric_dict, flow_stat_dict):
        # vnf tx is the datacenter switch rx and vice-versa
        metric_key = metric_dict['metric_key']
        switch_dpid = metric_dict['switch_dpid']
        vnf_name = metric_dict['vnf_name']
        vnf_interface = metric_dict['vnf_interface']
        previous_measurement = metric_dict['previous_measurement']
        previous_monitor_time = metric_dict['previous_monitor_time']
        cookie = metric_dict['cookie']

        counter = 0
        for flow_stat in flow_stat_dict[str(switch_dpid)]:
            if 'bytes' in metric_key:
                counter += flow_stat['byte_count']
            elif 'packet' in metric_key:
                counter += flow_stat['packet_count']

        # flow_uptime disabled for now (can give error)
        #flow_stat = flow_stat_dict[str(switch_dpid)][0]
        #flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)

        self.prom_metrics[metric_dict['metric_key']]. \
            labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': cookie}). \
            set(counter)
        try:
            pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
        except Exception, e:
            logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))
Пример #14
0
    def push2gateway(self):

        self.g = Gauge('gene_push', 'status',
                       ['server', 'store_name', 'clentIP', 'read_bytes', 'write_bytes', 'snapshot_time'],
                       registry=self.registry)

        snapshot_time = time.time()
        result = self.execute_some_command(command)
        for dict_key in result.keys():
            server = dict_key
            store_lists = result.get(server)

        for store_dict in store_lists:
            for store in store_dict.keys():
                store = store
                for client_list in store_dict.get(store):
                    for client in client_list.keys():
                        client = client

                        read = client_list.get(client).split(',')[0]
                        write = client_list.get(client).split(',')[1]

                        self.g.labels(server, store, client, read, write, snapshot_time)

        pushadd_to_gateway(self.target, job='gene_pushgateway', registry=self.registry, timeout=200)
Пример #15
0
        def wrapper_log_to_prometheus(*args, **kwargs):
            result = None
            if not settings.PUSHGATEWAY_HOST:
                result = job_func(*args, **kwargs)
            else:
                registry = CollectorRegistry()
                duration = Gauge(
                    f"os2bos_{job_name}_duration_seconds",
                    f"Duration of {job_name}",
                    registry=registry,
                )

                try:
                    with duration.time():
                        result = job_func(*args, **kwargs)
                except Exception:
                    pass
                else:
                    # only runs when there are no exceptions
                    last_success = Gauge(
                        f"os2bos_{job_name}_last_success",
                        f"Unixtime {job_name} last succeeded",
                        registry=registry,
                    )
                    last_success.set_to_current_time()
                finally:
                    pushadd_to_gateway(
                        settings.PUSHGATEWAY_HOST,
                        job=f"{job_name}",
                        registry=registry,
                    )
            return result
Пример #16
0
def push_sql_metric(metricName, dremioCluster, metricValue):
	# Push SQL Metric
	registry = CollectorRegistry()
	metric = Gauge(metricName, "SQL Metric, pushed via Gateway", registry=registry)
	metric.set_to_current_time()
	metric.set(metricValue)
	pushadd_to_gateway(pgwendpoint, job=dremioCluster, registry=registry, timeout=api_timeout)
Пример #17
0
def report(job='', metric='', desc='', val='', **labels):
    endpoint = "video:" + socket.gethostname()
    ts = int(time.time())

    #######  prometheus  #####
    registry = CollectorRegistry()
    if len(labels) > 0:
        labelname = labels.keys()
        g = Gauge(metric.replace('.', '_'),
                  desc,
                  labelnames=labelname,
                  registry=registry)
        lastpush = Gauge('lastpush_' + metric.replace('.', '_'),
                         desc,
                         labelnames=labelname,
                         registry=registry)
        g.labels(**labels).set(val)
        lastpush.labels(**labels).set_to_current_time()
        grouping_key = labels
        pushadd_to_gateway('localhost:9091',
                           job=job,
                           grouping_key=grouping_key,
                           registry=registry)
    else:
        g = Gauge(metric.replace('.', '_'), desc, registry=registry)
        g.set_to_current_time()
        g.set(int(val))
        pushadd_to_gateway('localhost:9091', job=job, registry=registry)
Пример #18
0
def push_source_status_metric(dremioCluster, sourceName, status):
	# Push Coordinator Status
	registry = CollectorRegistry()
	metric = Gauge(api_source_status_metric, "Source status, pushed via Gateway", labelnames=['source'], registry=registry)
	metric.labels(sourceName).set_to_current_time()
	metric.labels(sourceName).set(status)
	groupingKey = dict({"job": dremioCluster, "source": sourceName})
	pushadd_to_gateway(pgwendpoint, job=dremioCluster, registry=registry, timeout=api_timeout, grouping_key=groupingKey)
Пример #19
0
def export_metrics(key=None):
    try:
        pushadd_to_gateway(PUSHGATEWAY_ADDR,
                           job='sonemu-skewmon',
                           registry=registry,
                           grouping_key=key)
    except Exception as e:
        LOG.warning("Pushgateway not reachable: {0}".format(str(e)))
Пример #20
0
 def test_pushadd_with_groupingkey(self):
     pushadd_to_gateway(self.address, "my_job", self.registry, {'a': 9})
     self.assertEqual(self.requests[0][0].command, 'POST')
     self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9')
     self.assertEqual(self.requests[0][0].headers.get('content-type'),
                      CONTENT_TYPE_LATEST)
     self.assertEqual(self.requests[0][1],
                      b'# HELP g help\n# TYPE g gauge\ng 0.0\n')
Пример #21
0
def push(registry: CollectorRegistry):
    if prom_push_gateway is not None and benji_instance is not None:
        logger.info(f'Pushing Prometheus metrics to gateway {prom_push_gateway}.')
        logger.debug(generate_latest(registry).decode('utf-8'))
        try:
            pushadd_to_gateway(prom_push_gateway, job=benji_instance, registry=registry)
        except urllib.error.URLError as exception:
            logger.error(f'Pushing Prometheus metrics failed with a {type(exception).__name__} exception: {str(exception)}')
            logger.error('Ignoring.')
Пример #22
0
 def pushgateway(self, job_name):
     _PUSH_GATEWAY_HOST = os.getenv('PROMETHEUS_PUSHGATEWAY_HOST')
     _PUSH_GATEWAY_PORT = os.getenv('PROMETHEUS_PUSHGATEWAY_PORT')
     if _PUSH_GATEWAY_HOST and _PUSH_GATEWAY_PORT:
         try:
             push_gateway = f"{_PUSH_GATEWAY_HOST}:{_PUSH_GATEWAY_PORT}"
             _LOGGER.debug(f"Submitting metrics to Prometheus push gateway {push_gateway}")
             pushadd_to_gateway(push_gateway, job=job_name, registry=prometheus_registry)
         except Exception as e:
             _LOGGER.exception('An error occurred pushing the metrics: {}'.format(str(e)))
Пример #23
0
    def push_metrics(self):
        if self._push_gateway_host:
            with self._lock:
                registry = self._registry
                self._metrics.clear()
                self._registry = CollectorRegistry()

            try:
                pushadd_to_gateway(self._push_gateway_host, job='celery', registry=registry)
            except Exception:
                prometheus_soft_assert(False, 'Prometheus metric error while pushing to gateway')
Пример #24
0
 def push_metrics(self):
     if self._push_gateway_host:
         try:
             pushadd_to_gateway(self._push_gateway_host,
                                job='celery',
                                registry=self._registry)
         except Exception:
             prometheus_soft_assert(
                 False, 'Prometheus metric error while pushing to gateway')
         finally:
             # force re-creating metrics to prevent accumulating values
             self._metrics.clear()
Пример #25
0
 def update(self):
     """
     update metrics registered by registry
     """
     gateway = self.crawler.settings.get("SCRAPROM_PUSHGATEWAY_URL",
                                         defaults.SCRAPROM_PUSHGATEWAY_URL)
     job = self.crawler.settings.get("SCRAPROM_JOB_NAME",
                                     defaults.SCRAPROM_JOB_NAME)
     timeout = self.crawler.settings.get("SCRAPROM_PUSH_TIMEOUT",
                                         defaults.SCRAPROM_PUSH_TIMEOUT)
     grouping_key = self._get_grouping_key()
     pushadd_to_gateway(gateway, job, self.registry, grouping_key, timeout)
Пример #26
0
def extract_image(
    image_name: str,
    timeout: int = None,
    *,
    registry_credentials: str = None,
    tls_verify: bool = True,
) -> dict:
    """Extract dependencies from an image."""
    # Setting up the prometheus registry and the Gauge metric
    prometheus_registry = CollectorRegistry()
    metric_analyzer_job = Gauge(
        "package_extract_time",
        "Runtime of package extract job",
        registry=prometheus_registry,
    )

    # Begins a timer to record the running time of the job
    with metric_analyzer_job.time(), tempfile.TemporaryDirectory() as dir_path:
        image_name = quote(image_name)
        download_image(
            image_name,
            dir_path,
            timeout=timeout or None,
            registry_credentials=registry_credentials or None,
            tls_verify=tls_verify,
        )
        image_size = get_image_size(dir_path)
        rootfs_path = os.path.join(dir_path, "rootfs")
        layers = construct_rootfs(dir_path, rootfs_path)

        result = run_analyzers(rootfs_path)
        result["layers"] = layers
        result["image_size"] = image_size

    _push_gateway_host = os.getenv("PROMETHEUS_PUSHGATEWAY_HOST")
    _push_gateway_port = os.getenv("PROMETHEUS_PUSHGATEWAY_PORT")
    if _push_gateway_host and _push_gateway_port:
        try:
            push_gateway = f"{_push_gateway_host:_push_gateway_port}"
            _LOGGER.debug(
                f"Submitting metrics to Prometheus push gateway {push_gateway}"
            )
            pushadd_to_gateway(
                push_gateway,
                job="package-extract-runtime",
                registry=prometheus_registry,
            )
        except Exception as e:
            _LOGGER.exception(
                "An error occurred pushing the metrics: {}".format(str(e))
            )

    return result
Пример #27
0
    def processing(self):
        self.registry = CollectorRegistry()
        self.g = Gauge(self.type, '状态-时间', ['ip', 'status', 'timestamp', 'response_time'], registry=self.registry)

        for ip in self.ips:
            ip, status, timestamp, response_time = self.gather(ip)
            self.g.labels(ip, status, timestamp, response_time)
        try:

            pushadd_to_gateway(self.targets, job='pingIP_status', registry=self.registry, timeout=200)

        except Exception as e:
            logging.error("Failt to push:" + str(e))
Пример #28
0
    def handle(self, node: str, unit: str, value: float):
        self.lock.acquire()
        if unit not in self.gauges:
            print("  [error] --> unrecognized unit: {}".format(unit))
            self.lock.release()
            return
        gauge = self.gauges[unit]
        gauge.labels(node).set(value)

        print("[push] node={}, unit={}, value={}".format(node, unit, value))
        pushadd_to_gateway(self.gateway_url,
                           job="home_energy_consumption",
                           registry=self.registry)
        self.lock.release()
Пример #29
0
def push_api_cluster_status_metric(dremioCluster, clusterName, status):
    # Push child cluster status metric
    registry = CollectorRegistry()
    metric = Gauge(api_cluster_status_metric,
                   "Child cluster status, pushed via Gateway",
                   labelnames=['cluster'],
                   registry=registry)
    metric.labels(clusterName).set_to_current_time()
    metric.labels(clusterName).set(status)
    groupingKey = dict({"job": dremioCluster, "cluster": clusterName})
    pushadd_to_gateway(pgwendpoint,
                       job=dremioCluster,
                       registry=registry,
                       timeout=api_timeout,
                       grouping_key=groupingKey)
Пример #30
0
def push_sql_metric(metricName, dremioCluster, executor, metricValue):
    # Push SQL Metric
    registry = CollectorRegistry()
    metric = Gauge(metricName,
                   "SQL Metric, pushed via Gateway",
                   labelnames=['executor'],
                   registry=registry)
    metric.labels(executor).set_to_current_time()
    metric.labels(executor).set(metricValue)
    groupingKey = dict({"job": dremioCluster, "executor": executor})
    pushadd_to_gateway(pgwendpoint,
                       job=dremioCluster,
                       registry=registry,
                       timeout=api_timeout,
                       grouping_key=groupingKey)
Пример #31
0
def flush_to_gateway(v: List[Any], target: str):
    reg = CollectorRegistry()
    st_metric = Gauge('status', 'Node status', ["host"], registry=reg)
    cpu_metric = Gauge('cpu', 'CPU using percent', ["host"], registry=reg)
    mem_metric = Gauge('mem', 'Mem using GB', ["host"], registry=reg)

    for i in v:
        st_metric.labels(host=i[0]).set(i[1])
        if i[1] == 0:
            cpu_metric.labels(host=i[0]).set(i[2])
            mem_metric.labels(host=i[0]).set(i[3])
        try:
            pushadd_to_gateway(target, job='hpcMonitor', registry=reg)
        except Exception as e:
            logging.ERR("Failt to upload:" + str(e))
Пример #32
0
    def push(self, **kwargs):
        if 'handler' in kwargs:
            handler = kwargs.pop('handler')
        else:
            handler = None

        if not callable(handler):
            handler = self._pushgateway_handler

        strict = kwargs.pop('strict') if 'strict' in kwargs.keys() else False

        if strict:
            pushadd_to_gateway(self.base_url, handler=handler, **kwargs)
        else:
            push_to_gateway(self.base_url, handler=handler, **kwargs)
Пример #33
0
def push_api_current_executor_metric(dremioCluster, clusterName, runningCount):
    # Push Current Executors provisioned metric
    registry = CollectorRegistry()
    metric = Gauge(api_current_executor_metric,
                   "Current number of expected executors, pushed via Gateway",
                   labelnames=['cluster'],
                   registry=registry)
    metric.labels(clusterName).set_to_current_time()
    metric.labels(clusterName).set(runningCount)
    groupingKey = dict({"job": dremioCluster, "cluster": clusterName})
    pushadd_to_gateway(pgwendpoint,
                       job=dremioCluster,
                       registry=registry,
                       timeout=api_timeout,
                       grouping_key=groupingKey)
Пример #34
0
    def set_network_metric(self, metric_dict, port_stat_dict):
        # vnf tx is the datacenter switch rx and vice-versa
        metric_key = self.switch_tx_rx(metric_dict['metric_key'])
        switch_dpid = metric_dict['switch_dpid']
        vnf_name = metric_dict['vnf_name']
        vnf_interface = metric_dict['vnf_interface']
        previous_measurement = metric_dict['previous_measurement']
        previous_monitor_time = metric_dict['previous_monitor_time']
        mon_port = metric_dict['mon_port']

        for port_stat in port_stat_dict[str(switch_dpid)]:
            if int(port_stat['port_no']) == int(mon_port):
                port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)
                this_measurement = int(port_stat[metric_key])
                #logging.info('set prom packets:{0} {1}:{2}'.format(this_measurement, vnf_name, vnf_interface))

                # set prometheus metric
                self.prom_metrics[metric_dict['metric_key']].\
                    labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': None}).\
                    set(this_measurement)
                #push_to_gateway(self.pushgateway, job='SDNcontroller',
                #                grouping_key={'metric':metric_dict['metric_key']}, registry=self.registry)

                # 1 single monitor job for all metrics of the SDN controller
                pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)

                if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:
                    metric_dict['previous_measurement'] = int(port_stat[metric_key])
                    metric_dict['previous_monitor_time'] = port_uptime
                    # do first measurement
                    #logging.info('first measurement')
                    time.sleep(1)
                    self.monitor_lock.release()

                    metric_rate = self.get_network_metrics()
                    return metric_rate

                else:
                    time_delta = (port_uptime - metric_dict['previous_monitor_time'])
                    metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)
                    #logging.info('metric: {0} rate:{1}'.format(metric_dict['metric_key'], metric_rate))

                metric_dict['previous_measurement'] = this_measurement
                metric_dict['previous_monitor_time'] = port_uptime
                return metric_rate

        logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
        return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
Пример #35
0
    def set_network_metric(self, metric_dict, port_stat_dict):
        # vnf tx is the datacenter switch rx and vice-versa
        metric_key = self.switch_tx_rx(metric_dict['metric_key'])
        switch_dpid = metric_dict['switch_dpid']
        vnf_name = metric_dict['vnf_name']
        vnf_interface = metric_dict['vnf_interface']
        previous_measurement = metric_dict['previous_measurement']
        previous_monitor_time = metric_dict['previous_monitor_time']
        mon_port = metric_dict['mon_port']

        for port_stat in port_stat_dict[str(switch_dpid)]:
            if int(port_stat['port_no']) == int(mon_port):
                port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)
                this_measurement = int(port_stat[metric_key])

                # set prometheus metric
                self.prom_metrics[metric_dict['metric_key']].\
                    labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': None}).\
                    set(this_measurement)

                # 1 single monitor job for all metrics of the SDN controller
                pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)

                # also the rate is calculated here, but not used for now
                # (rate can be easily queried from prometheus also)
                if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:
                    metric_dict['previous_measurement'] = int(port_stat[metric_key])
                    metric_dict['previous_monitor_time'] = port_uptime
                    # do first measurement
                    #time.sleep(1)
                    #self.monitor_lock.release()
                    # rate cannot be calculated yet (need a first measurement)
                    metric_rate = None

                else:
                    time_delta = (port_uptime - metric_dict['previous_monitor_time'])
                    metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)

                metric_dict['previous_measurement'] = this_measurement
                metric_dict['previous_monitor_time'] = port_uptime
                return

        logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
        logging.exception('monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))
        logging.exception('port dict:{0}'.format(port_stat_dict))
        return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
Пример #36
0
    def get_flow_metrics(self):
        """
        Get all metrics defined in the list and export it to Prometheus.
        """
        while self.start_monitoring:

            self.monitor_flow_lock.acquire()

            for flow_dict in self.flow_metrics:
                data = {}

                data['cookie'] = flow_dict['cookie']
                data['cookie_mask'] = COOKIE_MASK

                if 'tx' in flow_dict['metric_key']:
                    data['match'] = {'in_port': flow_dict['mon_port']}
                elif 'rx' in flow_dict['metric_key']:
                    data['out_port'] = flow_dict['mon_port']

                # query Ryu
                ret = self.net.ryu_REST(
                    'stats/flow', dpid=flow_dict['switch_dpid'], data=data)
                if isinstance(ret, dict):
                    flow_stat_dict = ret
                elif isinstance(ret, basestring):
                    flow_stat_dict = ast.literal_eval(ret.rstrip())
                else:
                    flow_stat_dict = None

                logging.debug('received flow stat:{0} '.format(flow_stat_dict))

                self.set_flow_metric(flow_dict, flow_stat_dict)

            try:
                if len(self.flow_metrics) > 0:
                    pushadd_to_gateway(
                        self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
            except Exception as e:
                logging.warning(
                    "Pushgateway not reachable: {0} {1}".format(Exception, e))

            self.monitor_flow_lock.release()
            time.sleep(1)
Пример #37
0
 def test_pushadd_with_groupingkey(self):
     pushadd_to_gateway(self.address, "my_job", self.registry, {"a": 9})
     self.assertEqual(self.requests[0][0].command, "POST")
     self.assertEqual(self.requests[0][0].path, "/metrics/job/my_job/a/9")
     self.assertEqual(self.requests[0][0].headers.get("content-type"), CONTENT_TYPE_LATEST)
     self.assertEqual(self.requests[0][1], b"# HELP g help\n# TYPE g gauge\ng 0.0\n")
Пример #38
0
 def test_pushadd_with_groupingkey(self):
     pushadd_to_gateway(self.address, "my_job", self.registry, {'a': 9})
     self.assertEqual(self.requests[0][0].command, 'POST')
     self.assertEqual(self.requests[0][0].path, '/job/my_job/a/9')
     self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
     self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n')