def get_algorithms(current_skyline_app): """ Returns a dict of algorithms and their ids. """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) algorithms = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error( 'error :: get_algorithms :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_algorithms :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_algorithms :: could not get a MySQL engine - %s' % str(err)) algorithms_list = [] if engine: try: connection = engine.connect() stmt = 'SELECT DISTINCT(algorithm) FROM algorithms' result = connection.execute(stmt) for row in result: algorithms_list.append(row['algorithm']) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_algorithms :: failed to build algorithms_list - %s' % str(err)) if algorithms_list: try: connection = engine.connect() for algorithm in algorithms_list: stmt = 'SELECT id FROM algorithms WHERE algorithm=\'%s\'' % algorithm result = connection.execute(stmt) for row in result: algorithms[algorithm] = row['id'] break connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_algorithms :: failed to build algorithms - %s' % str(err)) if engine: engine_disposal(current_skyline_app, engine) return algorithms
def get_db_fp_timeseries(current_skyline_app, metric_id, fp_id): """ Return a features profile timeseries from the database as a list """ function_str = 'functions.database.queries.fp_timeseries' current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) timeseries = [] try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return timeseries try: start_db_query = timer() metric_fp_ts_table = 'z_ts_%s' % str(metric_id) stmt = 'SELECT timestamp, value FROM %s WHERE fp_id=%s' % ( metric_fp_ts_table, str(fp_id)) connection = engine.connect() for row in engine.execute(stmt): fp_id_ts_timestamp = int(row['timestamp']) fp_id_ts_value = float(row['value']) if fp_id_ts_timestamp and fp_id_ts_value: timeseries.append([fp_id_ts_timestamp, fp_id_ts_value]) connection.close() end_db_query = timer() current_logger.info( '%s :: determined %s values for the fp_id %s time series in %6f seconds' % (function_str, str(len(timeseries)), str(fp_id), (end_db_query - start_db_query))) except Exception as e: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: %s :: could not determine timestamps and values from %s - %s' % (function_str, metric_fp_ts_table, e)) if engine: engine_disposal(current_skyline_app, engine) return timeseries
def latest_anomalies(current_skyline_app): """ Return the latest anomalies as a list of tuples, each tuple a DB row. """ function_str = 'database_queries.latest_anomalies' current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) anomalies = [] try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % (function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return anomalies try: anomalies_table, fail_msg, trace = anomalies_table_meta(current_skyline_app, engine) current_logger.info(fail_msg) except Exception as e: trace = traceback.format_exc() current_logger.error('%s' % trace) fail_msg = 'error :: %s :: failed to get metrics_table meta - %s' % (function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return anomalies try: connection = engine.connect() # Replacing panorama query # query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies ORDER BY id DESC LIMIT 10' stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id, anomalies_table.c.anomalous_datapoint, anomalies_table.c.anomaly_timestamp, anomalies_table.c.full_duration, anomalies_table.c.created_timestamp, anomalies_table.c.anomaly_end_timestamp]).\ where(anomalies_table.c.id > 0).order_by(anomalies_table.c.id.desc()).\ limit(10) results = connection.execute(stmt) anomalies = [] if results is not None: for row in results: if row is not None: anomalies.append(row) if not anomalies: anomalies = [] connection.close() current_logger.info('%s :: determined %s latest anomalies' % ( function_str, str(len(anomalies)))) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not determine latest anomalies - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return anomalies if engine: engine_disposal(current_skyline_app, engine) return anomalies
def metric_id_from_base_name(current_skyline_app, base_name): """ Given a base name, return the metric_id """ metric_id = 0 function_str = 'database_queries.metric_id_from_base_name' current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) return False, fail_msg, trace try: metrics_table, fail_msg, trace = metrics_table_meta( current_skyline_app, engine) current_logger.info(fail_msg) except Exception as e: trace = traceback.format_exc() current_logger.error('%s' % trace) fail_msg = 'error :: %s :: failed to get metrics_table meta for %s- %s' % ( function_str, base_name, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace try: connection = engine.connect() stmt = select([metrics_table ]).where(metrics_table.c.metric == base_name) result = connection.execute(stmt) for row in result: metric_id = int(row['id']) break connection.close() current_logger.info('%s :: determined db metric id: %s' % (function_str, str(metric_id))) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not determine id of metric from DB for %s - %s' % ( function_str, base_name, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace if engine: engine_disposal(current_skyline_app, engine) if not metric_id: current_logger.error('error :: %s :: no id for metric in the DB - %s' % (function_str, base_name)) return metric_id
def get_anomalies_from_timestamp(current_skyline_app, metric_id, from_timestamp): """ Given a metric_id and timestamp return the anomalies for a metric or all metrics from the given timestamp. """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) anomalies = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error( 'error :: get_anomalies_from_timestamp :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_anomalies_from_timestamp :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_anomalies_from_timestamp :: could not get a MySQL engine - %s' % str(err)) if engine: try: anomalies_table, fail_msg, trace = anomalies_table_meta( current_skyline_app, engine) if fail_msg != 'anomalies_table meta reflected OK': current_logger.error( 'error :: get_anomalies_from_timestamp :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_anomalies_from_timestamp :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_anomalies_from_timestamp :: anomalies_table_meta - %s' % str(err)) try: connection = engine.connect() if metric_id: stmt = select([anomalies_table]).\ where(anomalies_table.c.metric_id == metric_id).\ where(anomalies_table.c.anomaly_timestamp >= from_timestamp).\ order_by(anomalies_table.c.id.desc()) else: stmt = select([anomalies_table]).\ where(anomalies_table.c.anomaly_timestamp >= from_timestamp).\ order_by(anomalies_table.c.id.desc()) results = connection.execute(stmt) for row in results: anomaly_id = row['id'] anomalies[anomaly_id] = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_anomalies_from_timestamp :: failed to build anomalies dict - %s' % str(err)) if engine: engine_disposal(current_skyline_app, engine) return anomalies
def get_anomalies(current_skyline_app, metric_id, params={'latest': False}): """ Given a metric_id, return the anomalies for a metric or the latest one if latest is passed as True """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) anomalies = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error( 'error :: get_anomalies :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_anomalies :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_anomalies :: could not get a MySQL engine - %s' % str(err)) latest = False if params: try: latest = params['latest'] except KeyError: latest = False if engine: try: anomalies_table, fail_msg, trace = anomalies_table_meta( current_skyline_app, engine) if fail_msg != 'anomalies_table meta reflected OK': current_logger.error( 'error :: get_anomalies :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_anomalies :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_anomalies :: anomalies_table_meta - %s' % str(err)) try: connection = engine.connect() if latest: stmt = select([ anomalies_table ]).where(anomalies_table.c.metric_id == metric_id).order_by( anomalies_table.c.id.desc()).limit(1) else: stmt = select([ anomalies_table ]).where(anomalies_table.c.metric_id == metric_id).order_by( anomalies_table.c.id.desc()) results = connection.execute(stmt) for row in results: anomaly_id = row['id'] anomalies[anomaly_id] = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_anomalies :: failed to build anomalies dict - %s' % str(err)) if engine: engine_disposal(current_skyline_app, engine) return anomalies
def get_metric_group(current_skyline_app, metric_id=0): """ Returns the metric_group table row as dict or all the metric_group table rows as dict if metric_id is not passed or is passed as 0. """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) metric_group = {} if metric_id: metric_group[metric_id] = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error( 'error :: get_metric_group :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_metric_group :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_metric_group :: could not get a MySQL engine - %s' % str(err)) if engine: try: metric_group_table, fail_msg, trace = metric_group_table_meta( current_skyline_app, engine) if fail_msg != 'metric_group meta reflected OK': current_logger.error( 'error :: get_metric_group :: could not get metric_group_table_meta fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_metric_group :: could not get metric_group_table_meta trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_metric_group :: metric_group_table_meta - %s' % str(err)) try: connection = engine.connect() if metric_id: stmt = select([ metric_group_table ]).where(metric_group_table.c.metric_id == metric_id).order_by( metric_group_table.c.avg_coefficient.desc()) else: stmt = select([metric_group_table]) results = connection.execute(stmt) for row in results: related_metric_id = row['related_metric_id'] if metric_id: metric_group[metric_id][related_metric_id] = dict(row) else: p_metric_id = row['metric_id'] if p_metric_id not in list(metric_group.keys()): metric_group[p_metric_id] = {} metric_group[p_metric_id][related_metric_id] = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_metric_group :: failed to build metric_group dict - %s' % str(err)) for mi_key in list(metric_group.keys()): for rmi_key in list(metric_group[mi_key].keys()): for key in list(metric_group[mi_key][rmi_key].keys()): if 'decimal.Decimal' in str( type(metric_group[mi_key][rmi_key][key])): metric_group[mi_key][rmi_key][key] = float( metric_group[mi_key][rmi_key][key]) if 'datetime.datetime' in str( type(metric_group[mi_key][rmi_key][key])): metric_group[mi_key][rmi_key][key] = str( metric_group[mi_key][rmi_key][key]) if key == 'shifted_counts': try: shifted_counts_str = metric_group[mi_key][rmi_key][ key].decode('utf-8') shifted_counts = literal_eval(shifted_counts_str) except AttributeError: shifted_counts = metric_group[mi_key][rmi_key][key] metric_group[mi_key][rmi_key][key] = shifted_counts # Remap the metric_id and related_metric_id for clarity metric_group[mi_key][rmi_key]['metric_id'] = rmi_key metric_group[mi_key][rmi_key]['related_to_metric_id'] = mi_key del metric_group[mi_key][rmi_key]['related_metric_id'] if engine: engine_disposal(current_skyline_app, engine) return metric_group
def get_all_db_metric_names(current_skyline_app, with_ids=False): """ Given return all metric names from the database as a list """ metric_names = [] metric_names_with_ids = {} function_str = 'database_queries.get_all_db_metric_names' current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % (function_str, e) current_logger.error('%s' % fail_msg) return False, fail_msg, trace try: metrics_table, fail_msg, trace = metrics_table_meta(current_skyline_app, engine) current_logger.info(fail_msg) except Exception as e: trace = traceback.format_exc() current_logger.error('%s' % trace) fail_msg = 'error :: %s :: failed to get metrics_table meta - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace try: connection = engine.connect() if with_ids: stmt = select([metrics_table.c.id, metrics_table.c.metric]) else: stmt = select([metrics_table.c.metric]) result = connection.execute(stmt) for row in result: base_name = row['metric'] metric_names.append(base_name) if with_ids: metric_names_with_ids[base_name] = row['id'] connection.close() current_logger.info('%s :: determined metric names from the db: %s' % ( function_str, str(len(metric_names)))) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not determine metric names from DB for - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace if engine: engine_disposal(current_skyline_app, engine) if not metric_names: current_logger.error('error :: %s :: no metric names returned from the DB' % ( function_str)) if with_ids: return metric_names, metric_names_with_ids return metric_names
def get_ionosphere_fp_ids_for_full_duration( current_skyline_app, metric_id, full_duration=0, enabled=True): """ Return the ionosphere table database rows as a dict """ function_str = 'functions.database.queries.get_ionosphere_fp_ids_for_full_duration' log_msg = None trace = None if enabled: enabled = 1 else: enabled = 0 current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) fp_ids_full_duration = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % (function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return fp_ids_full_duration try: ionosphere_table, log_msg, trace = ionosphere_table_meta(current_skyline_app, engine) except Exception as e: current_logger.error(traceback.format_exc()) current_logger.error('error :: %s :: failed to get ionosphere_table meta for metric id %s - %s' % ( function_str, str(metric_id), e)) if engine: engine_disposal(engine) if current_skyline_app == 'webapp': # Raise to webapp raise return fp_ids_full_duration try: connection = engine.connect() if full_duration: stmt = select([ionosphere_table]).\ where(ionosphere_table.c.metric_id == int(metric_id)).\ where(ionosphere_table.c.full_duration == int(full_duration)).\ where(ionosphere_table.c.enabled == enabled) else: stmt = select([ionosphere_table]).\ where(ionosphere_table.c.metric_id == int(metric_id)).\ where(ionosphere_table.c.enabled == enabled) results = connection.execute(stmt) if results: for row in results: fp_id = row['id'] fp_ids_full_duration[fp_id] = row connection.close() except Exception as e: current_logger.error(traceback.format_exc()) current_logger.error('error :: %s :: could not get ionosphere rows for metric id %s - %s' % ( function_str, str(metric_id), e)) if engine: engine_disposal(engine) if current_skyline_app == 'webapp': # Raise to webapp raise return fp_ids_full_duration if engine: engine_disposal(current_skyline_app, engine) if log_msg: del log_msg if trace: del trace return fp_ids_full_duration
def get_ionosphere_fp_db_row(current_skyline_app, fp_id): """ Return the ionosphere table database row as a dict """ function_str = 'functions.database.queries.get_ionosphere_fp_db_row' log_msg = None trace = None current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) fp_id_row = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % (function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return fp_id_row try: ionosphere_table, log_msg, trace = ionosphere_table_meta(current_skyline_app, engine) except Exception as e: current_logger.error(traceback.format_exc()) current_logger.error('error :: %s :: failed to get ionosphere_table meta for fp id %s - %s' % ( function_str, str(fp_id), e)) if engine: engine_disposal(engine) if current_skyline_app == 'webapp': # Raise to webapp raise return fp_id_row try: connection = engine.connect() stmt = select([ionosphere_table]).where(ionosphere_table.c.id == int(fp_id)) result = connection.execute(stmt) row = result.fetchone() try: fp_id_row = dict(row) except Exception as e: trace = traceback.format_exc() connection.close() current_logger.error(trace) fail_msg = 'error :: %s :: could not convert db ionosphere row to dict for fp id %s - %s' % ( function_str, str(fp_id), e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(engine) if current_skyline_app == 'webapp': # Raise to webapp raise return fp_id_row connection.close() except Exception as e: current_logger.error(traceback.format_exc()) current_logger.error('error :: %s :: could not get ionosphere row for fp id %s - %s' % ( function_str, str(fp_id), e)) if engine: engine_disposal(engine) if current_skyline_app == 'webapp': # Raise to webapp raise return fp_id_row if engine: engine_disposal(current_skyline_app, engine) if log_msg: del log_msg if trace: del trace return fp_id_row
def metric_ids_from_metric_like(current_skyline_app, metrics_like_str): """ Given a SQL metric name wildcard, return a list of metric_ids """ metric_ids = [] function_str = 'database_queries.metric_ids_from_metric_like' current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) if current_skyline_app == 'webapp': # Raise to webapp raise return False try: metrics_table, fail_msg, trace = metrics_table_meta( current_skyline_app, engine) current_logger.info(fail_msg) except Exception as e: trace = traceback.format_exc() current_logger.error('%s' % trace) fail_msg = 'error :: %s :: failed to get metrics_table meta - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace try: connection = engine.connect() stmt = select([metrics_table.c.id ]).where(metrics_table.c.metric.ilike(metrics_like_str)) result = connection.execute(stmt) for row in result: metric_ids.append(int(row['id'])) connection.close() current_logger.info( '%s :: determined %s metric_ids for metric_like_str: %s' % (function_str, str(len(metric_ids)), metrics_like_str)) except Exception as e: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not determine ids of metrics_like_str: %s -%s' % ( function_str, str(metrics_like_str), e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace if engine: engine_disposal(current_skyline_app, engine) if not metric_ids: current_logger.error('error :: %s :: no ids for metrics_like_str: %s' % str(metrics_like_str)) return metric_ids
def get_metric_group_info(current_skyline_app, metric_id=0, params={'namespaces': []}): """ Returns the metrics_groups table as dict """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) metric_groups_info = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error( 'error :: get_metric_group_info :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_metric_group_info :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_metric_group_info :: could not get a MySQL engine - %s' % str(err)) if engine: try: metric_groups_info_table, fail_msg, trace = metric_group_info_table_meta( current_skyline_app, engine) if fail_msg != 'metric_group_info meta reflected OK': current_logger.error( 'error :: get_metric_group_info :: could not get metric_groups_info_table_meta fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_metric_group_info :: could not get metric_groups_info_table_meta trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_metric_group_info :: metric_groups_info_table_meta - %s' % str(err)) try: connection = engine.connect() if metric_id: stmt = select([ metric_groups_info_table ]).where(metric_groups_info_table.c.metric_id == metric_id) else: stmt = select([metric_groups_info_table]) results = connection.execute(stmt) for row in results: group_metric_id = row['metric_id'] if row['related_metrics'] > 0: metric_groups_info[group_metric_id] = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_metric_group_info :: failed to build metric_groups_info dict - %s' % str(err)) ids_with_base_names = {} if not metric_id: try: ids_with_base_names = get_metric_ids_and_base_names( current_skyline_app) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: %s :: get_metric_group_info :: get_metric_ids_and_basenames failed - %s' % (current_skyline_app, str(err))) for mi_key in list(metric_groups_info.keys()): base_name = None if ids_with_base_names: try: base_name = ids_with_base_names[int(mi_key)] except KeyError: base_name = None if not base_name: try: base_name = get_base_name_from_metric_id( current_skyline_app, int(mi_key)) except Exception as err: current_logger.error( 'error :: %s :: get_metric_group_info :: get_base_name_from_metric_id failed to determine base_name for metric_id: %s - %s' % (current_skyline_app, str(metric_id), str(err))) base_name = 'unknown' metric_groups_info[mi_key]['metric'] = base_name for key in list(metric_groups_info[mi_key].keys()): if 'datetime.datetime' in str(type( metric_groups_info[mi_key][key])): metric_groups_info[mi_key][key] = str( metric_groups_info[mi_key][key]) # Sort by base_name if metric_groups_info and current_skyline_app == 'webapp': metric_groups_info_list = [] metric_groups_info_dict = {} metric_groups_info_keys = [] for group_metric_id in list(metric_groups_info.keys()): metric_group_info_data = [] if not metric_groups_info_keys: for index, key in enumerate( list(metric_groups_info[group_metric_id].keys())): metric_groups_info_keys.append(key) if key == 'metric': metric_index = index if key == 'metric_id': metric_id_index = index for key in metric_groups_info_keys: metric_group_info_data.append( metric_groups_info[group_metric_id][key]) metric_groups_info_list.append(metric_group_info_data) if metric_groups_info_list: sorted_metric_groups_info_list = sorted( metric_groups_info_list, key=lambda x: x[metric_index]) for item in sorted_metric_groups_info_list: group_metric_id = item[metric_id_index] metric_group_dict = {} metric_group_dict['metric_id'] = item[metric_id_index] metric_group_dict['metric'] = item[metric_index] for index, key in enumerate(metric_groups_info_keys): if key in ['metric', 'metric_id']: continue metric_group_dict[key] = item[index] metric_groups_info_dict[group_metric_id] = metric_group_dict metric_groups_info = metric_groups_info_dict namespaces = [] try: namespaces = params['namespaces'] except KeyError: namespaces = [] filtered_metric_groups_info = {} if namespaces: current_logger.info( '%s :: get_metric_group_info :: filtering results on namespaces: %s' % (current_skyline_app, str(namespaces))) for mi_key in list(metric_groups_info.keys()): pattern_match = False try: pattern_match, matched_by = matched_or_regexed_in_list( current_skyline_app, metric_groups_info[mi_key]['metric'], namespaces, False) if pattern_match: filtered_metric_groups_info[mi_key] = metric_groups_info[ mi_key] except Exception as err: current_logger.error( 'error :: %s :: get_metric_group_info :: matched_or_regexed_in_list failed to determine if matched in namespaces: %s - %s' % (current_skyline_app, str(namespaces), str(err))) if filtered_metric_groups_info: metric_groups_info = filtered_metric_groups_info.copy() if engine: engine_disposal(current_skyline_app, engine) return metric_groups_info
def get_cloudburst_row(current_skyline_app, cloudburst_id): """ Return the cloudburst table database row as a dict """ function_str = 'functions.database.queries.get_cloudburst_row' current_logger = logging.getLogger(current_skyline_app + 'Log') cloudburst_dict = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as err: trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % ( function_str, err) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return cloudburst_dict try: cloudburst_table, log_msg, trace = cloudburst_table_meta( current_skyline_app, engine) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: %s :: failed to get cloudburst_table meta for cloudburst id %s - %s' % (function_str, str(cloudburst_id), err)) if engine: engine_disposal(engine) if current_skyline_app == 'webapp': # Raise to webapp raise return cloudburst_dict try: connection = engine.connect() stmt = select([cloudburst_table ]).where(cloudburst_table.c.id == cloudburst_id) result = connection.execute(stmt) row = result.fetchone() cloudburst_dict = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: %s :: could not get cloudburst row for cloudburst id %s - %s' % (function_str, str(cloudburst_id), err)) if engine: engine_disposal(engine) if current_skyline_app == 'webapp': # Raise to webapp raise return cloudburst_dict if engine: engine_disposal(current_skyline_app, engine) return cloudburst_dict
def get_cross_correlations(current_skyline_app, anomaly_ids): """ Given a list of anomaly ids, return the cross correlations """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) cross_correlations = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error( 'error :: get_cross_correlations :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_cross_correlations :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_cross_correlations :: could not get a MySQL engine - %s' % str(err)) if engine: try: luminosity_table, fail_msg, trace = luminosity_table_meta( current_skyline_app, engine) if fail_msg != 'luminosity_table meta reflected OK': current_logger.error( 'error :: get_cross_correlations :: could not get luminosity_table_meta fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error( 'error :: get_cross_correlations :: could not get luminosity_table_meta trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_cross_correlations :: luminosity_table_meta - %s' % str(err)) try: connection = engine.connect() stmt = select([luminosity_table], luminosity_table.c.id.in_(anomaly_ids)) results = connection.execute(stmt) for row in results: anomaly_id = row['id'] if anomaly_id not in list(cross_correlations.keys()): cross_correlations[anomaly_id] = {} metric_id = row['metric_id'] cross_correlations[anomaly_id][metric_id] = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error( 'error :: get_cross_correlations :: failed to build cross_correlations dict - %s' % str(err)) if engine: engine_disposal(current_skyline_app, engine) return cross_correlations
def get_cloudburst_plot(cloudburst_id, base_name, shift, all_in_period=False): """ Create a plot of the cloudburst and return the path and filename :param cloudburst_id: the cloudburt id :param base_name: the name of the metric :param shift: the number of indice to shift the plot :type cloudburst_id: int :type base_name: str :type shift: int :return: path and file :rtype: str """ function_str = 'get_cloudburst_plot' logger.info( 'get_cloudburst_plot - cloudburst_id: %s, base_name: %s' % ( str(cloudburst_id), str(base_name))) save_to_file = '%s/cloudburst_id.%s.%s.shift.%s.png' % ( settings.SKYLINE_TMP_DIR, str(cloudburst_id), base_name, str(shift)) if all_in_period: save_to_file = '%s/cloudburst_id.%s.all.%s.shift.%s.png' % ( settings.SKYLINE_TMP_DIR, str(cloudburst_id), base_name, str(shift)) cloudburst_dict = {} try: cloudburst_dict = get_cloudburst_row(skyline_app, cloudburst_id) except Exception as err: logger.error(traceback.format_exc()) logger.error('error :: %s :: get_cloudburst_row failed - %s' % ( function_str, err)) raise if not cloudburst_dict: logger.error('error :: %s :: no cloudburst_dict - %s' % function_str) return None, None if os.path.isfile(save_to_file): return cloudburst_dict, save_to_file try: from_timestamp = cloudburst_dict['from_timestamp'] until_timestamp = from_timestamp + cloudburst_dict['full_duration'] resolution = cloudburst_dict['resolution'] except Exception as err: logger.error(traceback.format_exc()) logger.error('error :: %s :: failed parse values from cloudburst_dict - %s' % ( function_str, err)) raise metrics_functions = {} metrics_functions[base_name] = {} metrics_functions[base_name]['functions'] = None if resolution > 60: resolution_minutes = int(resolution / 60) summarize_intervalString = '%smin' % str(resolution_minutes) summarize_func = 'median' metrics_functions[base_name]['functions'] = {'summarize': {'intervalString': summarize_intervalString, 'func': summarize_func}} try: metrics_timeseries = get_metrics_timeseries(skyline_app, metrics_functions, from_timestamp, until_timestamp, log=False) except Exception as err: logger.error(traceback.format_exc()) logger.error('error :: %s :: get_metrics_timeseries failed - %s' % ( function_str, err)) raise try: timeseries = metrics_timeseries[base_name]['timeseries'] timeseries_length = len(timeseries) timeseries = timeseries[1:(timeseries_length - 2)] except Exception as err: logger.error(traceback.format_exc()) logger.error('error :: %s :: failed to determine timeseries - %s' % ( function_str, err)) raise anomalies_in_period = [] if all_in_period: try: engine, fail_msg, trace = get_engine(skyline_app) except Exception as err: trace = traceback.format_exc() logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % (function_str, err) logger.error('%s' % fail_msg) if engine: engine_disposal(skyline_app, engine) raise try: cloudburst_table, log_msg, trace = cloudburst_table_meta(skyline_app, engine) except Exception as err: logger.error(traceback.format_exc()) logger.error('error :: %s :: failed to get cloudburst_table meta for cloudburst id %s - %s' % ( function_str, str(cloudburst_id), err)) if engine: engine_disposal(engine) raise try: connection = engine.connect() stmt = select([cloudburst_table]).\ where(cloudburst_table.c.metric_id == cloudburst_dict['metric_id']).\ where(cloudburst_table.c.timestamp >= from_timestamp).\ where(cloudburst_table.c.timestamp <= until_timestamp).\ where(cloudburst_table.c.id != cloudburst_id) result = connection.execute(stmt) for row in result: anomalies_in_period.append([row['timestamp'], row['end']]) connection.close() except Exception as err: logger.error(traceback.format_exc()) logger.error('error :: %s :: could not get cloudburst row for cloudburst id %s - %s' % ( function_str, str(cloudburst_id), err)) if engine: engine_disposal(engine) raise if engine: engine_disposal(skyline_app, engine) anomalies = [] if anomalies_in_period: logger.info( 'get_cloudburst_plot - adding %s all_in_period anomalies to cloudburst plot' % ( str(len(anomalies_in_period)))) for period_anomalies in anomalies_in_period: new_anomalies = [item for item in timeseries if int(item[0]) >= period_anomalies[0] and int(item[0]) <= period_anomalies[1]] if new_anomalies: anomalies = anomalies + new_anomalies try: cloudburst_anomalies = [item for item in timeseries if int(item[0]) >= cloudburst_dict['timestamp'] and int(item[0]) <= cloudburst_dict['end']] anomalies = anomalies + cloudburst_anomalies df = pd.DataFrame(timeseries, columns=['date', 'value']) df['date'] = pd.to_datetime(df['date'], unit='s') datetime_index = pd.DatetimeIndex(df['date'].values) df = df.set_index(datetime_index) df.drop('date', axis=1, inplace=True) anomalies_data = [] # @modified 20210831 # Align periods # anomaly_timestamps = [int(item[0]) for item in anomalies] # anomaly_timestamps = [(int(item[0]) + (resolution * 2)) for item in anomalies] # anomaly_timestamps = [(int(item[0]) + (resolution * 6)) for item in anomalies] # anomaly_timestamps = [(int(item[0]) + (resolution * 4)) for item in anomalies] # anomaly_timestamps = [(int(item[0]) + (resolution * 3)) for item in anomalies] anomaly_timestamps = [(int(item[0]) + (resolution * shift)) for item in anomalies] for item in timeseries: if int(item[0]) in anomaly_timestamps: anomalies_data.append(1) else: anomalies_data.append(0) df['anomalies'] = anomalies_data title = '%s\ncloudburst id: %s' % (base_name, str(cloudburst_id)) if all_in_period: title = '%s (all in period)' % title plot(df['value'], anomaly=df['anomalies'], anomaly_color='red', title=title, save_to_file=save_to_file) except Exception as err: logger.error(traceback.format_exc()) logger.error('error :: %s :: failed to plot cloudburst - %s' % ( function_str, err)) raise if not os.path.isfile(save_to_file): return cloudburst_dict, None return cloudburst_dict, save_to_file
def base_name_from_metric_id(current_skyline_app, metric_id, log=False): """ Given a metric id, return the base_name """ base_name = None function_str = 'database_queries.base_name_from_metric_id' def get_log(current_skyline_app): current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) return current_logger if log: current_logger = get_log(current_skyline_app) try: engine, fail_msg, trace = get_engine(current_skyline_app) except Exception as e: if not log: current_logger = get_log(current_skyline_app) trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % ( function_str, e) current_logger.error('%s' % fail_msg) return False, fail_msg, trace try: metrics_table, fail_msg, trace = metrics_table_meta( current_skyline_app, engine) if log: current_logger.info(fail_msg) except Exception as e: if not log: current_logger = get_log(current_skyline_app) trace = traceback.format_exc() current_logger.error('%s' % trace) fail_msg = 'error :: %s :: failed to get metrics_table meta for %s- %s' % ( function_str, base_name, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace try: connection = engine.connect() stmt = select([metrics_table.c.metric ]).where(metrics_table.c.id == metric_id) result = connection.execute(stmt) for row in result: base_name = row['metric'] break connection.close() if log: current_logger.info( '%s :: determined metric with id %s base_name: %s' % (function_str, str(metric_id), base_name)) except Exception as e: if not log: current_logger = get_log(current_skyline_app) trace = traceback.format_exc() current_logger.error(trace) fail_msg = 'error :: %s :: could not determine id of metric from DB for %s - %s' % ( function_str, base_name, e) current_logger.error('%s' % fail_msg) if engine: engine_disposal(current_skyline_app, engine) if current_skyline_app == 'webapp': # Raise to webapp raise return False, fail_msg, trace if engine: engine_disposal(current_skyline_app, engine) if not base_name: current_logger.error( 'error :: %s :: no base_name for metric in the DB with id %s' % (function_str, str(metric_id))) return base_name
def related_to_metric_groups(current_skyline_app, base_name, metric_id): """ Returns a dict of all the metric_groups that a metric is part of. """ current_skyline_app_logger = current_skyline_app + 'Log' current_logger = logging.getLogger(current_skyline_app_logger) related_to_metric_groups_dict = {} related_to_metric_groups_dict['metric'] = base_name related_to_metric_groups_dict['metric_id'] = metric_id related_to_metric_groups_dict['related_to_metrics'] = {} try: engine, fail_msg, trace = get_engine(current_skyline_app) if fail_msg != 'got MySQL engine': current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine - %s' % str(err)) if engine: try: metric_group_table, fail_msg, trace = metric_group_table_meta(current_skyline_app, engine) if fail_msg != 'metric_group meta reflected OK': current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta fail_msg - %s' % str(fail_msg)) if trace != 'none': current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta trace - %s' % str(trace)) except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error('error :: related_to_metric_groups :: metric_group_table_meta - %s' % str(err)) try: connection = engine.connect() if metric_id: stmt = select([metric_group_table]).where(metric_group_table.c.related_metric_id == metric_id).order_by(metric_group_table.c.avg_coefficient.desc()) else: stmt = select([metric_group_table]) results = connection.execute(stmt) for row in results: group_metric_id = row['metric_id'] group_base_name = None try: group_base_name = get_base_name_from_metric_id(current_skyline_app, group_metric_id) except Exception as err: current_logger.error('error :: related_to_metric_groups :: base_name_from_metric_id failed to determine base_name from metric_id: %s - %s' % ( str(group_metric_id), str(err))) if group_base_name: related_to_metric_groups_dict['related_to_metrics'][group_base_name] = dict(row) connection.close() except Exception as err: current_logger.error(traceback.format_exc()) current_logger.error('error :: related_to_metric_groups :: failed to build metric_groups dict - %s' % str(err)) if engine: engine_disposal(current_skyline_app, engine) for related_metric in list(related_to_metric_groups_dict['related_to_metrics'].keys()): for key in list(related_to_metric_groups_dict['related_to_metrics'][related_metric].keys()): if 'decimal.Decimal' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])): related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = float(related_to_metric_groups_dict['related_to_metrics'][related_metric][key]) if 'datetime.datetime' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])): related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = str(related_to_metric_groups_dict['related_to_metrics'][related_metric][key]) if key == 'shifted_counts': try: shifted_counts_str = related_to_metric_groups_dict['related_to_metrics'][related_metric][key].decode('utf-8') shifted_counts = literal_eval(shifted_counts_str) except AttributeError: shifted_counts = related_to_metric_groups_dict['related_to_metrics'][related_metric][key] related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = shifted_counts # Remap the metric_id and related_metric_id for clarity related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_to_metric_id'] = related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id'] related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id'] = metric_id del related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_metric_id'] return related_to_metric_groups_dict
def update_metric_group(base_name, metric_id, cross_correlation_relationships, ids_with_metric_names): """ Given a cross_correlation_relationships dictionary update the metric_group rows in the """ updated_metric_group = False # base_name = list(cross_correlation_relationships.keys())[0] # metric_id = cross_correlation_relationships[base_name]['metric_id'] logger.info( 'related_metrics :: update_metric_group :: updating metric_group for %s id: %s' % (base_name, str(metric_id))) timestamp = int(time()) metric_group_info = {} try: metric_group_info = get_metric_group_info(skyline_app, metric_id) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: get_metric_group_info failed - %s' % str(err)) metric_group = {} try: metric_group = get_metric_group(skyline_app, metric_id) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: get_metric_group failed for %s - %s' % (str(metric_id), str(err))) metric_group_to_be_updated = False if len(metric_group[metric_id].keys()) == 0: metric_group = {} metric_group_to_be_updated = True else: metric_group = metric_group[metric_id] current_related_metric_ids = [] if metric_group: try: for related_metric_id in list(metric_group.keys()): current_related_metric_ids.append(related_metric_id) except Exception as err: logger.error( 'error :: related_metrics :: update_metric_group :: error occurred building current_related_metric_ids - %s' % (str(err))) new_related_metric_ids = [] try: for related_metric in list(cross_correlation_relationships.keys()): new_related_metric_ids.append( cross_correlation_relationships[related_metric]['metric_id']) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: error occurred building new_related_metric_ids - %s' % (str(err))) add_related_metric_ids = [] update_related_metric_ids = [] for related_metric_id in new_related_metric_ids: if related_metric_id == metric_id: continue if related_metric_id not in current_related_metric_ids: add_related_metric_ids.append(related_metric_id) metric_group_to_be_updated = True remove_related_metric_ids = [] for related_metric_id in current_related_metric_ids: if related_metric_id not in new_related_metric_ids: remove_related_metric_ids.append(related_metric_id) metric_group_to_be_updated = True else: try: related_metric = ids_with_metric_names[related_metric_id] current_avg_coefficient = metric_group[related_metric_id][ 'avg_coefficient'] new_avg_coefficient = round( cross_correlation_relationships[related_metric] ['avg_coefficient'], 5) if current_avg_coefficient != new_avg_coefficient: logger.info( 'related_metrics :: update_metric_group :: updating %s in metric_group for %s as avg_coefficient has changed - current: %s, new: %s' % (related_metric, base_name, str(current_avg_coefficient), str(new_avg_coefficient))) update_related_metric_ids.append(related_metric_id) metric_group_to_be_updated = True except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: could not get determine while to update related_metric_id %s in metric_group - %s' % (str(related_metric_id), str(err))) if not add_related_metric_ids: if not remove_related_metric_ids: if not update_related_metric_ids: logger.info( 'related_metrics :: update_metric_group :: no change to metric_group for %s, all related metric ids and avg_coefficient values are the same' % (str(base_name))) try: engine, fail_msg, trace = get_engine(skyline_app) if fail_msg != 'got MySQL engine': logger.error( 'error :: related_metrics :: update_metric_group :: could not get a MySQL engine fail_msg - %s' % str(fail_msg)) if trace != 'none': logger.error( 'error :: related_metrics :: update_metric_group :: could not get a MySQL engine trace - %s' % str(trace)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: could not get a MySQL engine - %s' % str(err)) if engine: try: metric_group_table, fail_msg, trace = metric_group_table_meta( skyline_app, engine) if fail_msg != 'metric_group meta reflected OK': logger.error( 'error :: related_metrics :: update_metric_group :: could not get metric_group_table_meta fail_msg - %s' % str(fail_msg)) if trace != 'none': logger.error( 'error :: related_metrics :: update_metric_group :: could not get metric_group_table_meta trace - %s' % str(trace)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: metric_group_table_meta - %s' % str(err)) if metric_group_to_be_updated: try: connection = engine.connect() except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: engine.connect failed to add related_metric_ids - %s' % str(err)) # Delete any metrics from the metric_group that are no longer valid if len(remove_related_metric_ids) > 0: logger.info( 'debug :: related_metrics :: update_metric_group :: %s metric_group: %s, current_related_metric_ids: %s' % (base_name, str(metric_id), str(current_related_metric_ids))) logger.info( 'debug :: related_metrics :: update_metric_group :: %s metric_group: %s, new_related_metric_ids: %s' % (base_name, str(metric_id), str(new_related_metric_ids))) logger.info( 'debug :: related_metrics :: update_metric_group :: %s metric_group: %s, metric_group: %s' % (base_name, str(metric_id), str(metric_group))) logger.info( 'debug :: related_metrics :: update_metric_group :: %s metric_group: %s, cross_correlation_relationships%s' % (base_name, str(metric_id), str(cross_correlation_relationships))) logger.info( 'debug :: related_metrics :: update_metric_group :: deleting related_metric_ids from %s metric_group: %s, remove_related_metric_ids: %s' % (base_name, str(metric_id), str(remove_related_metric_ids))) for related_metric_id in remove_related_metric_ids: related_metric = ids_with_metric_names[related_metric_id] logger.info( 'debug :: related_metrics :: update_metric_group :: removing entry from %s metric_group for %s: %s' % (base_name, related_metric, str(metric_group[related_metric_id]))) try: stmt = metric_group_table.delete().\ where(metric_group_table.c.metric_id == metric_id).\ where(metric_group_table.c.related_metric_id.in_(remove_related_metric_ids)) result = connection.execute(stmt) if result: updated_metric_group = True except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to delete related_metric_ids from metric_group_table - %s' % str(err)) if len(add_related_metric_ids) > 0: logger.info( 'related_metrics :: update_metric_group :: adding %s related_metric_ids to %s metric_group: %s' % (str(len(add_related_metric_ids)), base_name, str(metric_id))) logger.info( 'related_metrics :: update_metric_group :: adding related_metric_ids to %s metric_group: %s, related_metric_ids: %s' % (base_name, str(metric_id), str(add_related_metric_ids))) for related_metric in list(cross_correlation_relationships.keys()): cross_correlation_data = cross_correlation_relationships[ related_metric] related_metric_id = cross_correlation_data['metric_id'] if related_metric_id not in add_related_metric_ids: continue if related_metric_id == metric_id: continue logger.info( 'related_metrics :: update_metric_group :: adding %s related_metric_id: %s on %s metric_group: %s' % (related_metric, str(related_metric_id), base_name, str(metric_id))) # A NOTE ON confidence score # The confidence score is not stored in the DB but rather # calculated in real time functions/metrics/get_related_metrics # This is due to the fact that changing correlations on other # metrics in group impact this score. If it were calculated via # get_cross_correlation_relationships or here, every time a # metric in a group had a new correlation, the confidence score # each every metric in the group would have to be recalculated # and stored. This is a very dynamic variable. try: result = None ins = metric_group_table.insert().values( metric_id=int(metric_id), related_metric_id=related_metric_id, avg_coefficient=cross_correlation_data[ 'avg_coefficient'], shifted_counts=json.dumps( cross_correlation_data['shifted_counts']), avg_shifted_coefficient=cross_correlation_data[ 'avg_shifted_coefficient'], timestamp=timestamp) result = connection.execute(ins) if result: updated_metric_group = True if DEBUG: logger.info( 'debug :: related_metrics :: update_metric_group :: inserted row for related_metric_id: %s on %s metric_group: %s' % (str(related_metric_id), base_name, str(metric_id))) else: logger.error( 'error :: related_metrics :: update_metric_group :: failed to insert row for related_metric_id: %s on %s metric_group: %s' % (str(related_metric_id), base_name, str(metric_id))) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to insert new related metric - %s' % str(err)) if len(update_related_metric_ids) > 0: logger.info( 'related_metrics :: update_metric_group :: updating %s related_metric_ids to %s metric_group: %s' % (str(len(update_related_metric_ids)), base_name, str(metric_id))) for related_metric in list(cross_correlation_relationships.keys()): cross_correlation_data = cross_correlation_relationships[ related_metric] related_metric_id = cross_correlation_data['metric_id'] if related_metric_id not in update_related_metric_ids: continue if related_metric_id == metric_id: continue try: stmt = metric_group_table.update().values( avg_coefficient=cross_correlation_data['avg_coefficient'], shifted_counts=json.dumps(cross_correlation_data['shifted_counts']), avg_shifted_coefficient=cross_correlation_data['avg_shifted_coefficient'], timestamp=timestamp).\ where(metric_group_table.c.metric_id == metric_id).\ where(metric_group_table.c.related_metric_id == related_metric_id) connection.execute(stmt) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to update metric_group_table from metric_id: %s, related_metric_id: %s - %s' % (str(metric_id), str(related_metric_id), str(err))) if metric_group_to_be_updated and engine: try: metric_group_info_table, fail_msg, trace = metric_group_info_table_meta( skyline_app, engine) if fail_msg != 'metric_group_info meta reflected OK': logger.error( 'error :: related_metrics :: update_metric_group :: could not get metric_group_info_table_meta fail_msg - %s' % str(fail_msg)) if trace != 'none': logger.error( 'error :: related_metrics :: update_metric_group :: could not get metric_group_info_table_meta trace - %s' % str(trace)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: metric_group_info_table_meta - %s' % str(err)) related_metrics_count = 0 if metric_group_to_be_updated: related_metrics_count = ( len(current_related_metric_ids) + len(add_related_metric_ids)) - len(remove_related_metric_ids) if metric_group_info and metric_group_to_be_updated: # Update the timestamp in the metric_group_info and Redis current_updated_count = 0 try: current_updated_count = metric_group_info[metric_id][ 'updated_count'] except KeyError: current_updated_count = 0 updated_count = current_updated_count + 1 try: connection.execute( metric_group_info_table.update( metric_group_info_table.c.metric_id == metric_id).values( related_metrics=related_metrics_count, last_updated=timestamp, updated_count=updated_count)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to update metric_group_info table record for metric_id: %s - %s' % (str(metric_id), str(err))) if not metric_group_info and metric_group_to_be_updated: # Insert a new metric_group_info record try: ins = metric_group_info_table.insert().values( metric_id=int(metric_id), related_metrics=related_metrics_count, last_updated=timestamp) result = connection.execute(ins) logger.info( 'related_metrics :: update_metric_group :: added new record to metric_groups_info table for %s' % (base_name)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to insert metric_group_info table record - %s' % str(err)) redis_conn_decoded = None try: redis_conn_decoded = get_redis_conn_decoded(skyline_app) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to get Redis hash aet.metrics_manager.metric_names_with_ids - %s' % str(err)) if metric_group_to_be_updated: try: connection.close() except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: connection.close() failed - %s' % str(err)) related_metric_ids = [] for related_metric_id in list(current_related_metric_ids + add_related_metric_ids): if related_metric_id not in remove_related_metric_ids: related_metric_ids.append(related_metric_id) related_metrics = [] for related_metric_id in related_metric_ids: related_metric = None try: related_metric = ids_with_metric_names[related_metric_id] except KeyError: related_metric = None if related_metric: related_metrics.append(related_metric) if related_metrics: try: redis_conn_decoded.hset( 'luminosity.related_metrics.metric_ids', metric_id, str(related_metric_ids)) logger.info( 'related_metrics :: update_metric_group :: updated luminosity.related_metrics.metric_ids Redis hash key for %s' % (base_name)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to updated luminosity.related_metrics.metric_ids Redis hash key for %s - %s' % (base_name, str(err))) try: redis_conn_decoded.hset('luminosity.related_metrics.metrics', base_name, str(related_metrics)) logger.info( 'related_metrics :: update_metric_group :: updated luminosity.related_metrics.metrics Redis hash key for %s' % (base_name)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to updated luminosity.related_metrics.metrics Redis hash key for %s - %s' % (base_name, str(err))) else: try: redis_conn_decoded.hdel( 'luminosity.related_metrics.metric_ids', metric_id) logger.info( 'related_metrics :: update_metric_group :: removed %s from luminosity.related_metrics.metrics Redis hash' % (str(metric_id))) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to remove from luminosity.related_metrics.metric_ids Redis hash key for %s - %s' % (base_name, str(err))) try: redis_conn_decoded.hdel('luminosity.related_metrics.metrics', base_name) logger.info( 'related_metrics :: update_metric_group :: removed %s from luminosity.related_metrics.metrics Redis hash' % (base_name)) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to updated luminosity.related_metrics.metrics Redis hash key for %s - %s' % (base_name, str(err))) try: redis_conn_decoded.hset('luminosity.metric_group.last_updated', metric_id, timestamp) logger.info( 'related_metrics :: update_metric_group :: updating metric_group for %s id: %s - done' % (base_name, str(metric_id))) except Exception as err: logger.error(traceback.format_exc()) logger.error( 'error :: related_metrics :: update_metric_group :: failed to get Redis hash aet.metrics_manager.metric_names_with_ids - %s' % str(err)) if engine: engine_disposal(skyline_app, engine) return updated_metric_group