def spawn_collectors(c): root_url = "http://%s:%s/%s" % (c['hostname'], c['port'], c['base_url']) pprint(c) for data_type_name in c['data_type']: data_type = c['data_type'][data_type_name] if data_type['enable'] == False: continue filter_dict = data_type.get('filter', None) for endpoint_name in data_type['endpoints']: endpoint = data_type['endpoints'][endpoint_name] url = root_url + endpoint['url'] interval = endpoint['interval'] prom_key = "%s:%s" % (data_type_name, endpoint_name) labels = endpoint['labels'] values = endpoint['values'] print(data_type_name, endpoint_name) pc = PrometheusClient(c['prometheus'], prom_key, labels, values) csv = CSVWriter(c['csvwriter'], prom_key, labels + values) if data_type_name == 'queue' and endpoint_name == 'config': worker = QueueConfigCollector(c['dpid'], url, interval, pc, csv, filter_dict) worker.start() continue worker = Collector(c['dpid'], url, interval, pc, csv, filter_dict) worker.start()
def main(): """ Script entry point. """ ctx = Context() ctx.args = parse_arguments() init_wlset(ctx) init_sysmax(ctx) if ctx.args.enable_prometheus: ctx.prometheus = PrometheusClient() ctx.prometheus.start() if ctx.args.detect: init_threshmap(ctx) init_tdp_map(ctx) if ctx.args.control: ctx.cpuq = CpuQuota(ctx.sysmax_util, ctx.args.margin_ratio, ctx.args.verbose) quota_controller = NaiveController(ctx.cpuq, ctx.args.quota_cycles) ctx.llc = LlcOccup() llc_controller = NaiveController(ctx.llc, ctx.args.llc_cycles) if ctx.args.disable_cat: ctx.llc = LlcOccup(init_level=Resource.BUGET_LEV_FULL) ctx.controllers = {Contention.CPU_CYC: quota_controller} else: ctx.controllers = { Contention.CPU_CYC: quota_controller, Contention.LLC: llc_controller } if ctx.args.record: with open('./util.csv', 'w') as utilf: utilf.write('TIME,CID,CNAME,UTIL\n') util_thread = threading.Thread(target=monitor, args=(mon_util_cycle, ctx, ctx.args.util_interval)) util_thread.start() if ctx.args.collect_metrics: if ctx.args.record: with open('./metrics.csv', 'w') as metricf: metricf.write('TIME,CID,CNAME,INST,CYC,CPI,L3MPKI,' + 'L3MISS,NF,UTIL,L3OCC,MBL,MBR\n') metric_thread = threading.Thread(target=monitor, args=(mon_metric_cycle, ctx, ctx.args.metric_interval)) metric_thread.start() print('eris agent version', __version__, 'is started!') try: util_thread.join() if ctx.args.collect_metrics: metric_thread.join() except KeyboardInterrupt: print('Shutdown eris agent ...exiting') ctx.interrupt = True except Exception: traceback.print_exc(file=sys.stdout) sys.exit(0)
from flask import Blueprint, request, redirect, url_for, jsonify from service import ForgeService from flask_login import login_required from prometheus import PrometheusClient metrics = Blueprint("metrics", __name__) prom = PrometheusClient("localhost", 9090) @metrics.route("/metrics/tps") @login_required def tps(): return jsonify(prom.get_tps()) @metrics.route("/metrics/ticktime") @login_required def ticktime(): return jsonify(prom.get_ticktime()) @metrics.route("/metrics/dim_tps") @login_required def dim_tps(): return jsonify(prom.get_dim_tps()) @metrics.route("/metrics/dim_ticktime") @login_required def dim_ticktime():
def prometheus(self): if self._prometheus is None: self._prometheus = PrometheusClient() return self._prometheus