def _merge_series(self, series, dimensions, limit): series_list = [] if not series: return series_list measurements = [] top_batch = [] num_series = len(series) for i in range(0, num_series): row = next(series[i][1], None) if row: top_batch.append([ i, row.time_stamp, row.value, rest_utils.from_json(row.value_meta) if row.value_meta else {} ]) else: num_series -= 1 top_batch.sort(key=lambda m: m[1], reverse=True) count = 0 while (not limit or count < limit) and top_batch: measurements.append([ self._isotime_msec(top_batch[num_series - 1][1]), top_batch[num_series - 1][2], top_batch[num_series - 1][3] ]) count += 1 row = next(series[top_batch[num_series - 1][0]][1], None) if row: top_batch[num_series - 1] = \ [top_batch[num_series - 1][0], row.time_stamp, row.value, rest_utils.from_json(row.value_meta) if row.value_meta else {}] top_batch.sort(key=lambda m: m[1], reverse=True) else: num_series -= 1 top_batch.pop() series_list.append({ 'name': series[0][0]['name'], 'id': series[0][0]['id'], 'columns': ['timestamp', 'value', 'value_meta'], 'measurements': measurements, 'dimensions': dimensions }) return count, series_list
def test_should_report_healthy_if_all_services_healthy(self, kafka_check, alarms_db_check, metrics_db_check, _): kafka_check.health_check.return_value = base.CheckResult(True, 'OK') alarms_db_check.health_check.return_value = base.CheckResult(True, 'OK') metrics_db_check.health_check.return_value = base.CheckResult(True, 'OK') self.set_route() self.resources._kafka_check = kafka_check self.resources._alarm_db_check = alarms_db_check self.resources._metrics_db_check = metrics_db_check response = self.simulate_request(ENDPOINT, headers={ 'Content-Type': 'application/json' }, decode='utf8', method='GET') self.assertEqual(falcon.HTTP_OK, self.srmock.status) response = utils.from_json(response) self.assertIn('kafka', response) self.assertIn('alarms_database', response) self.assertIn('metrics_database', response) self.assertEqual('OK', response.get('kafka')) self.assertEqual('OK', response.get('alarms_database')) self.assertEqual('OK', response.get('metrics_database'))
def test_should_report_healthy_if_all_services_healthy( self, kafka_check, alarms_db_check, metrics_db_check, _): kafka_check.health_check.return_value = base.CheckResult(True, 'OK') alarms_db_check.health_check.return_value = base.CheckResult( True, 'OK') metrics_db_check.health_check.return_value = base.CheckResult( True, 'OK') self.set_route() self.resources._kafka_check = kafka_check self.resources._alarm_db_check = alarms_db_check self.resources._metrics_db_check = metrics_db_check response = self.simulate_request( ENDPOINT, headers={'Content-Type': 'application/json'}, decode='utf8', method='GET') self.assertEqual(falcon.HTTP_OK, self.srmock.status) response = utils.from_json(response) self.assertIn('kafka', response) self.assertIn('alarms_database', response) self.assertIn('metrics_database', response) self.assertEqual('OK', response.get('kafka')) self.assertEqual('OK', response.get('alarms_database')) self.assertEqual('OK', response.get('metrics_database'))
def _merge_series(self, series, dimensions, limit): series_list = [] if not series: return series_list measurements = [] top_batch = [] num_series = len(series) for i in range(0, num_series): row = next(series[i][1], None) if row: top_batch.append([i, row.time_stamp, row.value, rest_utils.from_json(row.value_meta) if row.value_meta else {}]) else: num_series -= 1 top_batch.sort(key=lambda m: m[1], reverse=True) count = 0 while (not limit or count < limit) and top_batch: measurements.append([self._isotime_msec(top_batch[num_series - 1][1]), top_batch[num_series - 1][2], top_batch[num_series - 1][3]]) count += 1 row = next(series[top_batch[num_series - 1][0]][1], None) if row: top_batch[num_series - 1] = [top_batch[num_series - 1][0], row.time_stamp, row.value, rest_utils.from_json(row.value_meta) if row.value_meta else {}] top_batch.sort(key=lambda m: m[1], reverse=True) else: num_series -= 1 top_batch.pop() series_list.append({'name': series[0][0]['name'], 'id': series[0][0]['id'], 'columns': ['timestamp', 'value', 'value_meta'], 'measurements': measurements, 'dimensions': dimensions}) return count, series_list
def __init__(self, event, meta): if not event: error_msg = 'Envelope cannot be created without event' raise EventEnvelopeException(error_msg) if 'project_id' not in meta or not meta.get('project_id'): error_msg = 'Envelope cannot be created without project_id' raise EventEnvelopeException(error_msg) creation_time = self._get_creation_time() super(Envelope, self).__init__(event=rest_utils.from_json(event), creation_time=creation_time, meta=meta)
def _query_measurements(self, metrics, start_timestamp, end_timestamp, offset_timestamp, limit): results = [] for index, metric in enumerate(metrics): if index == 0: query = self._build_measurement_query(metric['id'], start_timestamp, end_timestamp, offset_timestamp, limit) else: if limit: fetch_size = min(self.session.default_fetch_size, max(1000, limit / min(index, 4))) else: fetch_size = self.session.default_fetch_size query = self._build_measurement_query(metric['id'], start_timestamp, end_timestamp, None, limit, fetch_size) results.append([ metric, iter(self.session.execute_async(query[0], query[1]).result()) ]) series_list = [] count = 0 for result in results: measurements = [] row = next(result[1], None) while row: measurements.append([ self._isotime_msec(row.time_stamp), row.value, rest_utils.from_json(row.value_meta) if row.value_meta else {} ]) count += 1 if limit and count >= limit: break row = next(result[1], None) series_list.append({ 'name': result[0]['name'], 'id': result[0]['id'], 'columns': ['timestamp', 'value', 'value_meta'], 'measurements': measurements, 'dimensions': result[0]['dimensions'] }) if limit and count >= limit: break return count, series_list
def from_json(req): """Read the json_msg from the http request body and return them as JSON. :param req: HTTP request object. :return: Returns the metrics as a JSON object. :raises falcon.HTTPBadRequest: """ try: msg = req.stream.read() return rest_utils.from_json(msg) except Exception as ex: LOG.exception(ex) raise falcon.HTTPBadRequest('Bad request', 'Request body is not valid JSON')
def _query_measurements(self, metrics, start_timestamp, end_timestamp, offset_timestamp, limit): results = [] for index, metric in enumerate(metrics): if index == 0: query = self._build_measurement_query(metric['id'], start_timestamp, end_timestamp, offset_timestamp, limit) else: if limit: fetch_size = min(self.session.default_fetch_size, max(1000, limit / min(index, 4))) else: fetch_size = self.session.default_fetch_size query = self._build_measurement_query(metric['id'], start_timestamp, end_timestamp, None, limit, fetch_size) results.append([metric, iter(self.session.execute_async(query[0], query[1]).result())]) series_list = [] count = 0 for result in results: measurements = [] row = next(result[1], None) while row: measurements.append( [self._isotime_msec(row.time_stamp), row.value, rest_utils.from_json(row.value_meta) if row.value_meta else {}]) count += 1 if limit and count >= limit: break row = next(result[1], None) series_list.append({'name': result[0]['name'], 'id': result[0]['id'], 'columns': ['timestamp', 'value', 'value_meta'], 'measurements': measurements, 'dimensions': result[0]['dimensions']}) if limit and count >= limit: break return count, series_list
def read_json_msg_body(req): """Read the json_msg from the http request body and return them as JSON. :param req: HTTP request object. :return: Returns the metrics as a JSON object. :raises falcon.HTTPBadRequest: """ try: msg = req.stream.read() json_msg = rest_utils.from_json(msg) return json_msg except ValueError as ex: LOG.debug(ex) raise falcon.HTTPBadRequest('Bad request', 'Request body is not valid JSON')
def test_should_report_not_healthy_if_one_service_not_healthy(self, kafka_check, alarms_db_check, metrics_db_check, _): test_list = [ {'kafka': {'healthy': False, 'message': 'Unavailable'}, 'alarms_db': {'healthy': True, 'message': 'OK'}, 'netrics_db': {'healthy': True, 'message': 'OK'} }, {'kafka': {'healthy': True, 'message': 'OK'}, 'alarms_db': {'healthy': False, 'message': 'Connection Error'}, 'netrics_db': {'healthy': True, 'message': 'OK'} }, {'kafka': {'healthy': True, 'message': 'OK'}, 'alarms_db': {'healthy': True, 'message': 'OK'}, 'netrics_db': {'healthy': False, 'message': 'Error'} }, ] for service in test_list: kafka_check.health_check.return_value = base.CheckResult(service['kafka']['healthy'], service['kafka']['message']) alarms_db_check.health_check.return_value = base.CheckResult(service['alarms_db']['healthy'], service['alarms_db']['message']) metrics_db_check.health_check.return_value = base.CheckResult(service['netrics_db']['healthy'], service['netrics_db']['message']) self.set_route() self.resources._kafka_check = kafka_check self.resources._alarm_db_check = alarms_db_check self.resources._metrics_db_check = metrics_db_check response = self.simulate_request(ENDPOINT, headers={ 'Content-Type': 'application/json' }, decode='utf8', method='GET') self.assertEqual(falcon.HTTP_SERVICE_UNAVAILABLE, self.srmock.status) response = utils.from_json(response) self.assertIn('kafka', response) self.assertIn('alarms_database', response) self.assertIn('metrics_database', response) self.assertEqual(service['kafka']['message'], response.get('kafka')) self.assertEqual(service['alarms_db']['message'], response.get('alarms_database')) self.assertEqual(service['netrics_db']['message'], response.get('metrics_database'))
def measurement_list(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag): try: json_measurement_list = [] rows = self._get_measurements(tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag) if not rows: return json_measurement_list if not merge_metrics_flag: dimensions = self._get_dimensions(tenant_id, region, name, dimensions) measurements_list = ([[ self._isotime_msec(time_stamp), value, rest_utils.from_json(value_meta) if value_meta else {} ] for (time_stamp, value, value_meta) in rows]) measurement = { u'name': name, # The last date in the measurements list. u'id': measurements_list[-1][0], u'dimensions': dimensions, u'columns': [u'timestamp', u'value', u'value_meta'], u'measurements': measurements_list } json_measurement_list.append(measurement) return json_measurement_list except exceptions.RepositoryException as ex: LOG.exception(ex) raise ex except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def alarm_history(self, tenant_id, alarm_id_list, offset, limit, start_timestamp=None, end_timestamp=None): try: json_alarm_history_list = [] if not alarm_id_list: return json_alarm_history_list conditions = [ALARM_TENANT_ID_EQ] params = [tenant_id.encode('utf8')] if len(alarm_id_list) == 1: conditions.append(ALARM_ID_EQ) params.append(alarm_id_list[0]) else: conditions.append( ' and alarm_id in ({}) '.format( ','.join( ['%s'] * len(alarm_id_list)))) for alarm_id in alarm_id_list: params.append(alarm_id) if offset: conditions.append(OFFSET_TIME_GT) params.append(offset) elif start_timestamp: conditions.append(START_TIME_GE) params.append(int(start_timestamp * 1000)) else: conditions.append('') if end_timestamp: conditions.append(END_TIME_LE) params.append(int(end_timestamp * 1000)) else: conditions.append('') if limit: conditions.append(LIMIT_CLAUSE) params.append(limit + 1) else: conditions.append('') rows = self.session.execute(ALARM_HISTORY_CQL % tuple(conditions), params) if not rows: return json_alarm_history_list sorted_rows = sorted(rows, key=lambda row: row.time_stamp) for (tenant_id, alarm_id, time_stamp, metrics, new_state, old_state, reason, reason_data, sub_alarms) in sorted_rows: alarm = {u'timestamp': self._isotime_msec(time_stamp), u'alarm_id': alarm_id, u'metrics': rest_utils.from_json(metrics), u'new_state': new_state, u'old_state': old_state, u'reason': reason, u'reason_data': reason_data, u'sub_alarms': rest_utils.from_json(sub_alarms), u'id': str(int((time_stamp - self.epoch).total_seconds() * 1000))} if alarm[u'sub_alarms']: for sub_alarm in alarm[u'sub_alarms']: sub_expr = sub_alarm['sub_alarm_expression'] metric_def = sub_expr['metric_definition'] sub_expr['metric_name'] = metric_def['name'] sub_expr['dimensions'] = metric_def['dimensions'] del sub_expr['metric_definition'] json_alarm_history_list.append(alarm) return json_alarm_history_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def alarm_history(self, tenant_id, alarm_id_list, offset, limit, start_timestamp=None, end_timestamp=None): try: json_alarm_history_list = [] if not alarm_id_list: return json_alarm_history_list select_stmt = """ select alarm_id, time_stamp, metrics, new_state, old_state, reason, reason_data, sub_alarms, tenant_id from alarm_state_history where tenant_id = %s """ parms = [tenant_id.encode('utf8')] place_holders = ["%s"] * len(alarm_id_list) in_clause = ' and alarm_id in ({}) '.format( ",".join(place_holders)) select_stmt += in_clause parms.extend(alarm_id_list) if offset and offset != '0': select_stmt += ' and time_stamp > %s ' dt = timeutils.normalize_time(timeutils.parse_isotime(offset)) parms.append(self._get_millis_from_timestamp(dt)) elif start_timestamp: select_stmt += ' and time_stamp >= %s ' parms.append(int(start_timestamp * 1000)) if end_timestamp: select_stmt += ' and time_stamp <= %s ' parms.append(int(end_timestamp * 1000)) if limit: select_stmt += ' limit %s ' parms.append(limit + 1) stmt = SimpleStatement(select_stmt, fetch_size=2147483647) rows = self.cassandra_session.execute(stmt, parms) if not rows: return json_alarm_history_list sorted_rows = sorted(rows, key=lambda row: row.time_stamp) for (alarm_id, time_stamp, metrics, new_state, old_state, reason, reason_data, sub_alarms, tenant_id) in sorted_rows: alarm = { u'timestamp': self._isotime_msec(time_stamp), u'alarm_id': alarm_id, u'metrics': rest_utils.from_json(metrics), u'new_state': new_state, u'old_state': old_state, u'reason': reason, u'reason_data': reason_data, u'sub_alarms': rest_utils.from_json(sub_alarms), u'id': str(self._get_millis_from_timestamp(time_stamp)).decode( 'utf8') } if alarm[u'sub_alarms']: for sub_alarm in alarm[u'sub_alarms']: sub_expr = sub_alarm['sub_alarm_expression'] metric_def = sub_expr['metric_definition'] sub_expr['metric_name'] = metric_def['name'] sub_expr['dimensions'] = metric_def['dimensions'] del sub_expr['metric_definition'] json_alarm_history_list.append(alarm) return json_alarm_history_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def alarm_history(self, tenant_id, alarm_id_list, offset, limit, start_timestamp=None, end_timestamp=None): try: json_alarm_history_list = [] if not alarm_id_list: return json_alarm_history_list for alarm_id in alarm_id_list: if '\'' in alarm_id or ';' in alarm_id: raise Exception( "Input from user contains single quote ['] or " "semi-colon [;] characters[ {} ]".format(alarm_id)) query = """ select alarm_id, metrics, new_state, old_state, reason, reason_data, sub_alarms, tenant_id from alarm_state_history """ where_clause = ( " where tenant_id = '{}' ".format(tenant_id.encode('utf8'))) alarm_id_where_clause_list = ( [" alarm_id = '{}' ".format(id.encode('utf8')) for id in alarm_id_list]) alarm_id_where_clause = " or ".join(alarm_id_where_clause_list) where_clause += ' and (' + alarm_id_where_clause + ')' time_clause = '' if start_timestamp: time_clause += " and time >= " + str(int(start_timestamp * 1000000)) + "u " if end_timestamp: time_clause += " and time <= " + str(int(end_timestamp * 1000000)) + "u " offset_clause = self._build_offset_clause(offset) limit_clause = self._build_limit_clause(limit) query += where_clause + time_clause + offset_clause + limit_clause result = self.influxdb_client.query(query) if not result: return json_alarm_history_list if 'values' in result.raw['series'][0]: for point in result.raw['series'][0]['values']: alarm_point = {u'timestamp': point[0], u'alarm_id': point[1], u'metrics': rest_utils.from_json(point[2]), u'new_state': point[3], u'old_state': point[4], u'reason': point[5], u'reason_data': point[6], u'sub_alarms': rest_utils.from_json(point[7]), u'id': str(self._get_millis_from_timestamp( timeutils.parse_isotime(point[0])))} # java api formats these during json serialization if alarm_point[u'sub_alarms']: for sub_alarm in alarm_point[u'sub_alarms']: sub_expr = sub_alarm['sub_alarm_expression'] metric_def = sub_expr['metric_definition'] sub_expr['metric_name'] = metric_def['name'] sub_expr['dimensions'] = metric_def['dimensions'] del sub_expr['metric_definition'] json_alarm_history_list.append(alarm_point) return json_alarm_history_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def measurement_list(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag, group_by): json_measurement_list = [] try: query = self._build_select_measurement_query(dimensions, name, tenant_id, region, start_timestamp, end_timestamp, offset, group_by, limit) if not group_by and not merge_metrics_flag: dimensions = self._get_dimensions(tenant_id, region, name, dimensions) query += " slimit 1" result = self.influxdb_client.query(query) if not result: return json_measurement_list offset_id = 0 if offset is not None: offset_tuple = offset.split('_') offset_id = int(offset_tuple[0]) if len(offset_tuple) > 1 else 0 index = offset_id for serie in result.raw['series']: if 'values' in serie: measurements_list = [] for point in serie['values']: value_meta = rest_utils.from_json(point[2]) if point[2] else {} timestamp = point[0][:19] + '.' + point[0][20:-1].ljust(3, '0') + 'Z' measurements_list.append([timestamp, point[1], value_meta]) measurement = {u'name': serie['name'], u'id': str(index), u'columns': [u'timestamp', u'value', u'value_meta'], u'measurements': measurements_list} if not group_by: measurement[u'dimensions'] = dimensions else: measurement[u'dimensions'] = {key: value for key, value in serie['tags'].iteritems() if not key.startswith('_')} json_measurement_list.append(measurement) index += 1 return json_measurement_list except exceptions.RepositoryException as ex: if (isinstance(ex.message, InfluxDBClientError) and ex.message.message.startswith(MEASUREMENT_NOT_FOUND_MSG)): return json_measurement_list else: LOG.exception(ex) raise ex except InfluxDBClientError as ex: if ex.message.startswith(MEASUREMENT_NOT_FOUND_MSG): return json_measurement_list else: LOG.exception(ex) raise exceptions.RepositoryException(ex) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def measurement_list(self, tenant_id, region, name, dimensions, start_timestamp, end_timestamp, offset, limit, merge_metrics_flag, group_by): json_measurement_list = [] offset_id = 0 offset_timestamp = offset if offset and "_" in offset: offset_id_str, _, offset_timestamp = offset.partition('_') offset_id = int(offset_id_str) try: # the build query method apparently only considers offset timestamp. query = self._build_select_measurement_query(dimensions, name, tenant_id, region, start_timestamp, end_timestamp, offset_timestamp, group_by, limit) if not group_by and not merge_metrics_flag: dimensions = self._get_dimensions(tenant_id, region, name, dimensions) query += " slimit 1" result = self.influxdb_client.query(query) if not result: return json_measurement_list index = offset_id for serie in result.raw['series']: if 'values' in serie: measurements_list = [] for point in serie['values']: value_meta = rest_utils.from_json(point[2]) if point[2] else {} timestamp = point[0][:19] + '.' + point[0][20:-1].ljust(3, '0') + 'Z' measurements_list.append([timestamp, point[1], value_meta]) measurement = {u'name': serie['name'], u'id': str(index), u'columns': [u'timestamp', u'value', u'value_meta'], u'measurements': measurements_list} if not group_by: measurement[u'dimensions'] = dimensions else: measurement[u'dimensions'] = { key: value for key, value in serie['tags'].items() if not key.startswith('_')} json_measurement_list.append(measurement) index += 1 return json_measurement_list except exceptions.RepositoryException as ex: if (len(ex.args) and isinstance(ex.args[0], InfluxDBClientError) and str(ex.args[0]).startswith(MEASUREMENT_NOT_FOUND_MSG)): return json_measurement_list else: LOG.exception(ex) raise ex except InfluxDBClientError as ex: if str(ex).startswith(MEASUREMENT_NOT_FOUND_MSG): return json_measurement_list else: LOG.exception(ex) raise exceptions.RepositoryException(ex) except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def alarm_history(self, tenant_id, alarm_id_list, offset, limit, start_timestamp=None, end_timestamp=None): try: json_alarm_history_list = [] if not alarm_id_list: return json_alarm_history_list conditions = [ALARM_TENANT_ID_EQ] params = [tenant_id.encode('utf8')] if len(alarm_id_list) == 1: conditions.append(ALARM_ID_EQ) params.append(alarm_id_list[0]) else: conditions.append(' and alarm_id in ({}) '.format(','.join( ['%s'] * len(alarm_id_list)))) for alarm_id in alarm_id_list: params.append(alarm_id) if offset: conditions.append(OFFSET_TIME_GT) params.append(offset) elif start_timestamp: conditions.append(START_TIME_GE) params.append(int(start_timestamp * 1000)) else: conditions.append('') if end_timestamp: conditions.append(END_TIME_LE) params.append(int(end_timestamp * 1000)) else: conditions.append('') if limit: conditions.append(LIMIT_CLAUSE) params.append(limit + 1) else: conditions.append('') rows = self.session.execute(ALARM_HISTORY_CQL % tuple(conditions), params) if not rows: return json_alarm_history_list sorted_rows = sorted(rows, key=lambda row: row.time_stamp) for (tenant_id, alarm_id, time_stamp, metrics, new_state, old_state, reason, reason_data, sub_alarms) in sorted_rows: alarm = { u'timestamp': self._isotime_msec(time_stamp), u'alarm_id': alarm_id, u'metrics': rest_utils.from_json(metrics), u'new_state': new_state, u'old_state': old_state, u'reason': reason, u'reason_data': reason_data, u'sub_alarms': rest_utils.from_json(sub_alarms), u'id': str(int((time_stamp - self.epoch).total_seconds() * 1000)) } if alarm[u'sub_alarms']: for sub_alarm in alarm[u'sub_alarms']: sub_expr = sub_alarm['sub_alarm_expression'] metric_def = sub_expr['metric_definition'] sub_expr['metric_name'] = metric_def['name'] sub_expr['dimensions'] = metric_def['dimensions'] del sub_expr['metric_definition'] json_alarm_history_list.append(alarm) return json_alarm_history_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)
def alarm_history(self, tenant_id, alarm_id_list, offset, limit, start_timestamp=None, end_timestamp=None): try: json_alarm_history_list = [] if not alarm_id_list: return json_alarm_history_list for alarm_id in alarm_id_list: if '\'' in alarm_id or ';' in alarm_id: raise Exception( "Input from user contains single quote ['] or " "semi-colon [;] characters[ {} ]".format(alarm_id)) query = """ select alarm_id, metrics, new_state, old_state, reason, reason_data, sub_alarms, tenant_id from alarm_state_history """ tenant_id = tenant_id if PY3 else tenant_id.encode('utf-8') where_clause = ( " where tenant_id = '{}' ".format(tenant_id)) alarm_id_where_clause_list = ( [" alarm_id = '{}' ".format(alarm_id if PY3 else alarm_id.encode('utf8')) for alarm_id in alarm_id_list]) alarm_id_where_clause = " or ".join(alarm_id_where_clause_list) where_clause += ' and (' + alarm_id_where_clause + ')' time_clause = '' if start_timestamp: time_clause += " and time >= " + str(int(start_timestamp * 1000000)) + "u " if end_timestamp: time_clause += " and time <= " + str(int(end_timestamp * 1000000)) + "u " offset_clause = self._build_offset_clause(offset) limit_clause = self._build_limit_clause(limit) query += where_clause + time_clause + offset_clause + limit_clause result = self.influxdb_client.query(query) if not result: return json_alarm_history_list if 'values' in result.raw['series'][0]: for point in result.raw['series'][0]['values']: point_list = list(point) alarm_point = {u'timestamp': point_list[0], u'alarm_id': point_list[1], u'metrics': rest_utils.from_json(point_list[2]), u'new_state': point_list[3], u'old_state': point_list[4], u'reason': point_list[5], u'reason_data': point_list[6], u'sub_alarms': rest_utils.from_json(point_list[7]), u'id': str(self._get_millis_from_timestamp( timeutils.parse_isotime(point_list[0])))} # java api formats these during json serialization if alarm_point[u'sub_alarms']: for sub_alarm in alarm_point[u'sub_alarms']: sub_expr = sub_alarm['sub_alarm_expression'] metric_def = sub_expr['metric_definition'] sub_expr['metric_name'] = metric_def['name'] sub_expr['dimensions'] = metric_def['dimensions'] del sub_expr['metric_definition'] json_alarm_history_list.append(alarm_point) return json_alarm_history_list except Exception as ex: LOG.exception(ex) raise exceptions.RepositoryException(ex)