Exemplo n.º 1
0
    file_log.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))

    logging.getLogger().addHandler(stream_log)
    logging.getLogger().addHandler(file_log)
    logging.getLogger().setLevel(logging.DEBUG)

    if arguments.print_config:
        print(collectd.COLLECTD_EXAMPLE_CONFIGURATION.format(socket=arguments.socket))
        quit()

    if arguments.fake:
        fake.fake()
    if arguments.collectd:
        metric_source = collectd.Collectd(arguments.socket)
    else:
        metric_source = prometheus.Prometheus(arguments.prometheus_address)
    if arguments.shell:
        shell()
        quit()
    if arguments.list:
        pprint.pprint([m.symbol + m.help for m in metric.Metric.discover_with_help(metric_source).values()])
        quit()

    logging.debug('arguments={} isatty={}'.format(arguments, sys.stdout.isatty()))
    try:
        if not sys.stdout.isatty() or arguments.batch:
            dumptostdout.dumpToStdout(arguments.metricPattern, arguments.interval, metric_source, arguments.iterations, arguments.ttl)
        else:
            fancyUserInterface(arguments.metricPattern, arguments.interval, metric_source, arguments.ttl)
    except KeyboardInterrupt:
        pass
Exemplo n.º 2
0
import prometheus

from datetime import datetime, timedelta, timezone
import json
import matplotlib.pyplot as plt
import os
from pathlib import Path
import sys

url, namespace = sys.argv[1:]

prom = prometheus.Prometheus(url)

memory_usage_query = """
sum by(container) (
    container_memory_usage_bytes{{
        job="kubelet",
        namespace="{namespace}",
        container!="POD",
        container!=""
    }}
)
"""

path = "memory_usage.json"
grab_fresh_data = True
if grab_fresh_data:
    start, end = prometheus.interval(hours=4)
    step = 60
    resp = prom.issue_request(memory_usage_query.format(namespace=namespace),
                              start.timestamp(), end.timestamp(), step)