コード例 #1
0
def update_logger(logger_id):
    req_body = request.get_json()

    if logger_id in LoggerManager.all_loggers:
        logger_to_update = LoggerManager.all_loggers[logger_id]
    else:
        abort(404) # Not found

    if 'name' in req_body:
        logger_to_update.name = req_body['name']

    if 'display_name' in req_body:
        logger_to_update.display_name = req_body['display_name']

    if 'is_displayed' in req_body:
        logger_to_update.is_displayed = req_body['is_displayed']
        LoggerManager.refresh_displayed_loggers()

    logger_to_update.save()

    return jsonify(logger_to_update.serialize_to_dict())
コード例 #2
0
CONNECTION_RETRY_TIMEOUT_MINUTES = 5
CONNECTION_RETRY_INTERVAL_SECONDS = 10

with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.json')) as config_file:
    config = json.loads(config_file.read())

app = Flask(__name__)
app.config['DEBUG'] = config['debug']

started_connecting_time = datetime.now()

while (datetime.now() - started_connecting_time) < timedelta(minutes=CONNECTION_RETRY_TIMEOUT_MINUTES):
    try:
        print('Connecting to database...')
        client = connections.create_connection(hosts=[config['db_host']])
        LoggerManager.initialize()
        print('Connected successfully')
        break
    except (ConnectionRefusedError, elasticsearch.exceptions.ConnectionError):
        print(f'Failed to connect to database, retrying in {CONNECTION_RETRY_INTERVAL_SECONDS} seconds')
        sleep(CONNECTION_RETRY_INTERVAL_SECONDS)


@app.route('/latest', methods=['GET'], strict_slashes=False)
@cross_origin()
def get_latest_logs():
    """
    Returns the latest log data, as well as the logger's name, for each displayed logger
    """

    search = Logger.search() \
コード例 #3
0
import traceback
from psycopg2 import OperationalError

from interface.settings import PREVIEW_LIMIT, POSTGRES_CONFIG, FIELD_DESCRIPTIONS, HEARTBEAT, BASE_DIR, LOGS_TIME_BUFFER
from .postgresql_manager import PostgreSQL_Manager
import threading
import time

from .input_validator import load_and_validate_columns, load_and_validate_constraints, load_and_validate_date, load_and_validate_order_clauses
from logger_manager import LoggerManager

PGM = PostgreSQL_Manager(POSTGRES_CONFIG, FIELD_DESCRIPTIONS.keys(),
                         LOGS_TIME_BUFFER)

LOGGER = LoggerManager(logger_name='opendata-interface',
                       module_name='opendata',
                       heartbeat_dir=HEARTBEAT['dir'])


def heartbeat():
    while True:
        try:
            PGM.get_min_and_max_dates()
            LOGGER.log_heartbeat('Scheduled heartbeat', HEARTBEAT['api_file'],
                                 'SUCCEEDED')
        except OperationalError as operational_error:
            LOGGER.log_heartbeat(
                'PostgreSQL error: {0}'.format(
                    str(operational_error).replace('\n', ' ')),
                HEARTBEAT['api_file'], 'FAILED')
        except Exception as exception:
コード例 #4
0
from models.TimeSyncModel import TimeSyncModel
from models.AveragesByTimeperiodModel import AveragesByTimeperiodModel
import analyzer_conf
import settings
from logger_manager import LoggerManager

import os
import time
import datetime
from dateutil.relativedelta import relativedelta

import numpy as np
import pandas as pd

db_manager = AnalyzerDatabaseManager(settings, analyzer_conf)
logger_m = LoggerManager(settings.LOGGER_NAME, 'analyzer')

logger_m.log_info('_tmp_find_anomalies_start', "Process started ...")

current_time = datetime.datetime.now()

# add first request timestamps for service calls that have appeared
logger_m.log_info(
    '_tmp_find_anomalies_1',
    "Add first request timestamps for service calls that have appeared ...")
logger_m.log_heartbeat(
    "Checking if completely new service calls have appeared",
    settings.HEARTBEAT_PATH, settings.HEARTBEAT_FILE, 'SUCCEEDED')
db_manager.add_first_request_timestamps_from_clean_data()
logger_m.log_info(
    '_tmp_find_anomalies_1',
コード例 #5
0
from aiohttp import web

import config
from template_manager import TemplateManager
from logger_manager import LoggerManager

logger = LoggerManager.get_logger(__name__)


async def index(request):
    log_msg = '[handler - index] handling {} from {}'
    logger.info(log_msg.format(str(request), request.remote))

    data = {'app_name': config.APP_NAME}
    rendered_index = await TemplateManager.render_template('index', data=data)

    return web.Response(text=rendered_index)
コード例 #6
0
from models.AveragesByTimeperiodModel import AveragesByTimeperiodModel
import analyzer_conf
import settings

from logger_manager import LoggerManager

import os
import time
import datetime
from dateutil.relativedelta import relativedelta

import numpy as np
import pandas as pd

db_manager = AnalyzerDatabaseManager(settings, analyzer_conf)
logger_m = LoggerManager(settings.LOGGER_NAME, 'analyzer')

logger_m.log_info('_tmp_train_or_update_historic_averages_models_start',
                  "Process started ...")

# add first request timestamps for service calls that have appeared
logger_m.log_info(
    '_tmp_train_or_update_historic_averages_models_1',
    "Checking if completely new service calls have appeared ...")
logger_m.log_heartbeat(
    "Checking if completely new service calls have appeared",
    settings.HEARTBEAT_PATH, settings.HEARTBEAT_FILE, 'SUCCEEDED')
db_manager.add_first_request_timestamps_from_clean_data()
logger_m.log_info(
    '_tmp_train_or_update_historic_averages_models_1',
    "Checking if completely new service calls have appeared ... Done!")