Beispiel #1
0
    def get(self, url):
        data_json = {}

        try:
            data_from_monitor = requests.get(
                url,
                auth=HTTPBasicAuth(self.user, self.passwd),
                verify=False,
                headers={'Content-Type': 'application/json'},
                timeout=self.timeout)
            data_from_monitor.raise_for_status()

            log.debug('API call: ' + data_from_monitor.url)
            if data_from_monitor.status_code != 200:
                log.info(
                    "Response", {
                        'status': data_from_monitor.status_code,
                        'error': data_json['error'],
                        'full_error': data_json['full_error']
                    })
            else:
                data_json = json.loads(data_from_monitor.content)
                log.info(
                    "call api {}".format(url), {
                        'status': data_from_monitor.status_code,
                        'response_time':
                        data_from_monitor.elapsed.total_seconds()
                    })
        except requests.exceptions.RequestException as err:
            log.error("{}".format(str(err)))

        return data_json
Beispiel #2
0
def after_request_func(response):
    total_requests.inc()

    call_status = {
        'content_length': response.content_length,
        'status': response.status_code,
        'count': total_requests._value.get()
    }
    log.info('Response', call_status)

    return response
Beispiel #3
0
def start_scheduler(configuration):

    if 'cache' in configuration:
        scheduler = AsyncIOScheduler()
        seconds = default_interval if configuration.get('cache').get('interval') is None \
            else configuration.get('cache').get('interval')
        ttl = default_ttl if configuration.get('cache').get('ttl') is None \
            else configuration.get('cache').get('ttl')
        log.info(f"Monitor collector will run every {seconds} sec")
        # Run once at start up
        monitorconnection.MonitorConfig().collect_cache(ttl)
        scheduler.add_job(monitorconnection.MonitorConfig().collect_cache, trigger='interval', args=[ttl], seconds=seconds)
        scheduler.start()
Beispiel #4
0
def after_request_func(response):
    total_requests.inc()

    call_status = {
        'remote_addr': request.remote_addr,
        'url': request.url,
        'user_agent': request.user_agent,
        'content_length': response.content_length,
        'status': response.status_code
    }
    log.info('Access', call_status)

    return response
Beispiel #5
0
def start():
    """
    Used from __main__ to start as simple flask app
    :return:
    """
    parser = argparse.ArgumentParser(description='monitor_exporter')

    parser.add_argument('-f',
                        '--configfile',
                        dest="configfile",
                        help="configuration file")

    parser.add_argument('-p', '--port', dest="port", help="Server port")

    args = parser.parse_args()

    port = 9631

    config_file = 'config.yml'
    if args.configfile:
        config_file = args.configfile

    configuration = config.read_config(config_file)
    if 'port' in configuration:
        port = configuration['port']

    if args.port:
        port = args.port

    log.configure_logger(configuration)

    monitorconnection.MonitorConfig(configuration)
    # Need to create a event loop for apscheduler
    loop = asyncio.new_event_loop()
    # Set the event loop
    asyncio.set_event_loop(loop)

    start_scheduler(configuration)

    log.info(f"Starting web app on port {port}")

    app = Quart(__name__)

    app.register_blueprint(proxy.app, url_prefix='')

    # Use the existing event loop
    app.run(host='0.0.0.0', port=port, loop=loop)
Beispiel #6
0
async def get_metrics():
    before_request_func(request)
    target = request.args.get('target')

    log.info('Collect metrics', {'target': target})

    monitor_data = Perfdata(monitorconnection.MonitorConfig(), target)

    # Fetch performance data from Monitor
    await asyncio.get_event_loop().create_task(monitor_data.get_perfdata())

    target_metrics = monitor_data.prometheus_format()

    resp = Response(target_metrics)
    resp.headers['Content-Type'] = CONTENT_TYPE_LATEST

    return resp
Beispiel #7
0
def get_metrics():
    log.info(request.url)
    target = request.args.get('target')

    log.info('Collect metrics', {'target': target})

    monitor_data = Perfdata(monitorconnection.MonitorConfig(), target)

    # Fetch performance data from Monitor
    monitor_data.get_perfdata()

    target_metrics = monitor_data.prometheus_format()

    resp = Response(target_metrics)
    resp.headers['Content-Type'] = CONTENT_TYPE_LATEST

    return resp
Beispiel #8
0
def start():
    """
    Used from __main__ to start as simple flask app
    :return:
    """
    parser = argparse.ArgumentParser(description='monitor_exporter')

    parser.add_argument('-f',
                        '--configfile',
                        dest="configfile",
                        help="configuration file")

    parser.add_argument('-p', '--port', dest="port", help="Server port")

    args = parser.parse_args()

    port = 9631

    config_file = 'config.yml'
    if args.configfile:
        config_file = args.configfile

    configuration = config.read_config(config_file)
    if 'port' in configuration:
        port = configuration['port']

    if args.port:
        port = args.port

    log.configure_logger(configuration)
    ##

    monitorconnection.MonitorConfig(configuration)
    log.info('Starting web app on port: ' + str(port))

    app = Flask(__name__)

    app.register_blueprint(proxy.app, url_prefix='/')
    app.run(host='0.0.0.0', port=port)
Beispiel #9
0
def create_app(config_path=None):
    """
    Used typical from gunicorn if need to pass config file different from default, e.g.
    gunicorn -b localhost:5000 --access-logfile /dev/null -w 4 "wsgi:create_app('/tmp/config.yml')"
    :param config_path:
    :return:
    """
    config_file = 'config.yml'
    if config_path:
        config_file = config_path

    configuration = config.read_config(config_file)

    log.configure_logger(configuration)

    monitorconnection.MonitorConfig(configuration)
    log.info('Starting web app')

    app = Flask(__name__)

    app.register_blueprint(proxy.app, url_prefix='/')

    return app
Beispiel #10
0
    def collect_cache(self, ttl: int):

        try:
            # get downtime
            now = int(time.time())
            ongoing_downtime = set()
            count_downtimes = self.get_sync(self.url_downtime.format('count'))
            if 'count' in count_downtimes and int(
                    count_downtimes['count']) > 0:
                count = count_downtimes['count']
                downtimes = []
                downtimes = self.get_sync(
                    self.url_downtime.format('query') + '&limit=' + str(count))
                for downtime in downtimes:

                    if downtime['start_time'] <= now <= downtime['end_time']:
                        # downtime['id'] is an int -> make it to a str to compare in set
                        ongoing_downtime.add(downtime['id'])

            # get the service data from Monitor
            start_time = time.time()
            count_services = self.get_sync(
                self.url_query_all_service_data.format('count'))

            hosts_to_services = {}

            services_flat = []
            if 'count' in count_services:
                count = count_services['count']
                services_flat = self.get_sync(
                    self.url_query_all_service_data.format('query') +
                    '&limit=' + str(count))
                for service_item in services_flat:
                    if service_item['host']['name'] not in hosts_to_services:
                        hosts_to_services[service_item['host']['name']] = []
                    host_name = service_item['host']['name']
                    downtime = set(service_item.pop('downtimes'))
                    if downtime & ongoing_downtime:
                        service_item['downtime'] = True
                    else:
                        service_item['downtime'] = False

                    hosts_to_services[host_name].append(service_item)

            # get the host data from Monitor
            count_hosts = self.get_sync(
                self.url_query_all_host.format('count'))
            hosts = []
            if 'count' in count_hosts:
                count = count_hosts['count']
                hosts = self.get_sync(
                    self.url_query_all_host.format('query') + '&limit=' +
                    str(count))

            start_redis_time = time.time()
            # Save host objects
            hosts_set = set()
            r = self.get_cache_connection()
            p = r.pipeline()
            for host in hosts:
                host_name = host['name']
                downtime = set(host.pop('downtimes'))
                if downtime & ongoing_downtime:
                    host['downtime'] = True
                else:
                    host['downtime'] = False

                hosts_set.add(host_name)
                if host_name in hosts_to_services:
                    host['services'] = hosts_to_services[host_name]
                p.set(self.key_hosts(host_name), json.dumps(host))
                p.expire(self.key_hosts(host_name), ttl)
            p.execute()

            # Build host index
            r = self.get_cache_connection()
            existing_hosts = r.smembers(self.key_host_index())
            log.info(f"Existing hosts {len(existing_hosts)}")
            log.info(f"Monitor hosts {len(hosts_set)}")

            del_hosts = existing_hosts - hosts_set
            add_hosts = hosts_set - existing_hosts
            log.info(f"Delete hosts {len(del_hosts)}")
            log.info(f"Add hosts {len(add_hosts)}")

            existing_downtimes = set(
                map(int, r.smembers(self.key_downtime_index())))
            log.info(f"Existing downtimes {len(existing_downtimes)}")
            log.info(f"Monitor downtimes {len(ongoing_downtime)}")
            del_downtimes = existing_downtimes - ongoing_downtime
            add_downtimes = ongoing_downtime - existing_downtimes
            log.info(f"Delete downtimes {len(del_downtimes)}")
            log.info(f"Add downtimes {len(add_downtimes)}")

            p = r.pipeline()
            for host in add_hosts:
                p.sadd(self.key_host_index(), host)
            for host in del_hosts:
                p.srem(self.key_host_index(), host)

            for downtime in add_downtimes:
                p.sadd(self.key_downtime_index(), downtime)
            for downtime in del_downtimes:
                p.srem(self.key_downtime_index(), downtime)

            p.expire(self.key_host_index(), ttl)
            p.expire(self.key_downtime_index(), ttl)
            p.execute()

            end_time = time.time()

            log.info(
                f"Monitor collector exec time total {(end_time - start_time)} redis write {len(services_flat) + len(hosts)} objects in {end_time - start_redis_time}"
            )
        except Exception as err:
            log.error(f"Monitor collector failed with {str(err)}")
Beispiel #11
0
def before_request_func(request):
    call_status = {'remote_addr': request.remote_addr, 'url': request.url}
    log.info('Access', call_status)