class PutMetricBolt(storm.BasicBolt): BOLT_NAME = "PutMetricBolt" def initialize(self, stormconf, context): self.cass = Cassandra() self.metrics = {} def log(self, msg): storm.log("[%s] %s" % (self.BOLT_NAME, msg)) def tracelog(self, e): msg = traceback.format_exc(e) for line in msg.splitlines(): self.log("TRACE: " + line) def process_put_metric_data_msg(self, metric_key, message): """ 데이터베이스에 MetricArchive 컬럼패밀리에 입력된 값 추가. 메모리 (self.metrics)에도 입력된 값 추가. 메모리 상의 메트릭을 기반으로 데이터베이스에 StatArchive 컬럼패밀리 업데이트. """ # 메시지 값이 없는 경우 종료 if message["value"] is None: return # 메트릭 가져오기 if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) timestamp = utils.parse_strtime(message["timestamp"]) self.metrics[metric_key].put_metric_data( metric_key, timestamp=timestamp, value=message["value"], unit=message["unit"] ) def process_put_metric_alarm_msg(self, metric_key, message): if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) project_id = message["project_id"] metricalarm = message["metricalarm"] self.metrics[metric_key].put_alarm(project_id, metricalarm) def process_delete_metric_alarms_msg(self, metric_key, message): alarmkey = UUID(message["alarmkey"]) self.log("debug: %s" % self.metrics.keys()) if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) self.metrics[metric_key].delete_metric_alarm(alarmkey) def process_set_alarm_state_msg(self, metric_key, message): project_id = message.get("project_id") alarm_name = message.get("alarm_name") state_reason_data = message.get("state_reason_data") alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) metric = self.metrics[metric_key] metricalarm = metric.alarms[alarm_key] metricalarm["state_reason"] = message.get("state_reason") metricalarm["state_value"] = message.get("state_value") metricalarm["state_reason_data"] = message.get("state_reason_data") # write into database alarm_columns = {"state_reason": message.get("state_reason"), "state_value": message.get("state_value")} if state_reason_data: alarm_columns["state_reason_data"] = state_reason_data self.cass.put_metric_alarm(alarm_key, alarm_columns) def process_check_metric_alarms_msg(self): now = datetime.utcnow() for metric in self.metrics.itervalues(): min_start_period = timedelta(seconds=metric.MIN_START_PERIOD) if (metric.lastchecked and (now - metric.lastchecked > min_start_period)) or (metric.lastchecked == None): metric.check_alarms() def process(self, tup): try: metric_key = UUID(tup.values[0]) if tup.values[0] else None except ValueError: self.log("badly formed hexadecimal UUID string - %s" % tup.values[0]) return message = json.loads(tup.values[1]) message_id = message.get("message_id") if message_id == PUT_METRIC_DATA_MSG_ID: self.log("process put_metric_data_msg (%s)" % message) self.process_put_metric_data_msg(metric_key, message) elif message_id == PUT_METRIC_ALARM_MSG_ID: self.log("process put_metric_alarm_msg (%s)" % message) self.process_put_metric_alarm_msg(metric_key, message) elif message_id == DELETE_ALARMS_MSG_ID: self.log("process put_metric_alarm_msg (%s)" % message) self.process_delete_metric_alarms_msg(metric_key, message) elif message_id == SET_ALARM_STATE_MSG_ID: self.log("process set_alarm_state_msg (%s)" % message) self.process_set_alarm_state_msg(metric_key, message) elif message_id == CHECK_METRIC_ALARM_MSG_ID: self.log("process check_metric_alarm_msg (%s)" % message) self.process_check_metric_alarms_msg() else: self.log("unknown message")
class UnpackMessageBolt(storm.BasicBolt): BOLT_NAME = "UnpackMessageBolt" def initialize(self, stormconf, context): self.cass = Cassandra() self.key_dict = {} def log(self, msg): storm.log("[%s] %s" % (self.BOLT_NAME, msg)) def tracelog(self, e): msg = traceback.format_exc(e) for line in msg.splitlines(): self.log("TRACE: " + line) def get_metric_key(self, message): memory_key = md5.md5(str((message['project_id'], message['namespace'], message['metric_name'], message['dimensions']))).digest() if memory_key not in self.key_dict: if len(self.key_dict) > threshhold: self.key_dict.popitem() self.key_dict[memory_key] = self.cass.get_metric_key_or_create( message['project_id'], message['namespace'], message['metric_name'], message['dimensions'], message['unit'] ) return self.key_dict[memory_key] def get_alarm_metric_key(self, alarmkey): alarm = self.cass.get_metric_alarm(alarmkey) if alarm: return str(alarm.get('metric_key')) else: return None def process(self, tup): message_buf = tup.values[0] message = json.loads(message_buf) message_id = message.get('message_id') if message_id == PUT_METRIC_DATA_MSG_ID: metric_key = str(self.get_metric_key(message)) storm.emit([metric_key, message_buf]) elif message_id == PUT_METRIC_ALARM_MSG_ID: metric_key = message.get('metric_key') storm.emit([metric_key, message_buf]) elif message_id == DELETE_ALARMS_MSG_ID: project_id = message.get('project_id') alarmkeys = message.get('alarmkeys') for alarmkey in alarmkeys: try: alarmkey_uuid = UUID(alarmkey) metric_key = self.get_alarm_metric_key(alarmkey_uuid) metric_key = str(metric_key) if metric_key: message['alarmkey'] = alarmkey storm.emit([metric_key, json.dumps(message)]) except Exception as e: storm.log("Alarm %s does not exists" % alarmkey) storm.log(traceback.format_exc(e)) elif message_id == SET_ALARM_STATE_MSG_ID: project_id = message.get('project_id') alarm_name = message.get('alarm_name') alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) if alarm_key: alarm = self.cass.get_metric_alarm(alarm_key) metric_key = str(alarm.get('metric_key')) storm.emit([metric_key, json.dumps(message)])
class API(object): ROLLING_FUNC_MAP = { 'Average': rolling_mean, 'Minimum': rolling_min, 'Maximum': rolling_max, 'SampleCount': rolling_sum, 'Sum': rolling_sum, } def __init__(self): self.cass = Cassandra() self.rpc = rpc.RemoteProcedureCall() def delete_alarms(self, project_id, alarm_names): alarmkeys = [] for alarm_name in alarm_names: k = self.cass.get_metric_alarm_key(project_id, alarm_name) if not k: raise ResourceNotFound("Alarm %s does not exists." % alarm_name) alarmkeys.append(str(k)) body = {'project_id': project_id, 'alarmkeys': alarmkeys} # UUID str self.rpc.send_msg(rpc.DELETE_ALARMS_MSG_ID, body) LOG.info("DELETE_ALARMS_MSG sent") def describe_alarms(self, project_id, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, next_token=None, state_value=None): """ params: project_id: string action_prefix: TODO: not implemented yet. alarm_name_prefix: string alarm_names: string list max_records: integer next_token: string (uuid type) state_value: string (OK | ALARM | INSUFFICIENT_DATA) """ alarms = self.cass.describe_alarms(project_id, action_prefix, alarm_name_prefix, alarm_names, max_records, next_token, state_value) return alarms def describe_alarms_for_metric(self, project_id, namespace, metric_name, dimensions=None, period=None, statistic=None, unit=None): """ params: project_id: string metric_name: string namespace: string dimensions: dict period: integer statistic: string (SampleCount | Average | Sum | Minimum | Maximum) unit: string """ alarms = self.cass.describe_alarms_for_metric(project_id, namespace, metric_name, dimensions=dimensions, period=period, statistic=statistic, unit=unit) return alarms def describe_alarm_history(self, project_id, alarm_name=None, end_date=None, history_item_type=None, max_records=None, next_token=None, start_date=None): histories = self.cass.describe_alarm_history( alarm_name=alarm_name, end_date=end_date, history_item_type=history_item_type, max_records=max_records, next_token=next_token, start_date=start_date, project_id=project_id ) return histories def set_alarm_actions(self, project_id, alarm_names, enabled): for alarm_name in alarm_names: alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) self.cass.put_metric_alarm(alarm_key, {'actions_enabled':enabled}) def set_alarm_state(self, project_id, alarm_name, state_reason, state_value, state_reason_data=None): k = self.cass.get_metric_alarm_key(project_id, alarm_name) if not k: raise ResourceNotFound("Alarm %s does not exists." % alarm_name) body = {'project_id': project_id, 'alarm_name': alarm_name, 'state_reason': state_reason, 'state_value': state_value, 'state_reason_data': state_reason_data} self.rpc.send_msg(rpc.SET_ALARM_STATE_MSG_ID, body) LOG.info("SET_ALARM_STATE_MSG sent") def get_metric_statistics(self, project_id, end_time, metric_name, namespace, period, start_time, statistics, unit=None, dimensions=None): """ 입력받은 조건에 일치하는 메트릭의 통계자료 리스트를 반환한다. """ def to_datapoint(df, idx): datapoint = df.ix[idx].dropna() if len(datapoint): return idx, datapoint end_idx = end_time.replace(second=0, microsecond=0) start_idx = start_time.replace(second=0, microsecond=0) start_ana_idx = start_idx - datetools.Minute() * (period / 60) daterange = DateRange(start_idx, end_idx, offset=datetools.Minute()) daterange_ana = DateRange(start_ana_idx, end_idx, offset=datetools.Minute()) # load default unit for metric from database if unit == "None" or not unit: metric_key = self.cass.get_metric_key( project_id=project_id, namespace=namespace, metric_name=metric_name, dimensions=dimensions ) if metric_key: unit = self.cass.get_metric_unit(metric_key) else: unit = "None" # load statistics data from database stats = self.cass.get_metric_statistics( project_id=project_id, namespace=namespace, metric_name=metric_name, start_time=start_ana_idx, end_time=end_time, period=period, statistics=statistics, dimensions=dimensions ) period = period / 60 # convert sec to min stat = DataFrame(index=daterange) for statistic, series in zip(statistics, stats): func = self.ROLLING_FUNC_MAP[statistic] if statistic == 'SampleCount': ts = TimeSeries(series, index=daterange_ana).fillna(0) else: ts = TimeSeries(series, index=daterange_ana) stat[statistic] = func(ts, period, min_periods=0) ret = filter(None, (to_datapoint(stat, i) for i in stat.index)) return ret def list_metrics(self, project_id, next_token=None, dimensions=None, metric_name=None, namespace=None): """ 입력받은 조건과 일치하는 메트릭의 리스트를 반환한다. """ metrics = self.cass.list_metrics(project_id, namespace, metric_name, dimensions, next_token) return metrics def put_metric_alarm(self, project_id, metricalarm): """ 알람을 DB에 넣고 값이 빈 dictionary 를 반환한다. 메트릭 유무 확인 알람 히스토리 발생. """ def metricalarm_for_json(metricalarm): alarm_for_json = { 'actionEnabled': metricalarm.get('actions_enabled', False), 'alarmActions': metricalarm.get('alarm_actions', []), 'alarmArn': metricalarm.get('alarm_arn'), 'alarmConfigurationUpdatedTimestamp': metricalarm.get('alarm_configuration_updated_timestamp'), 'alarmDescription': metricalarm.get('alarm_description'), 'alarmName': metricalarm.get('alarm_name'), 'comparisonOperator': metricalarm.get('comparison_operator'), 'dimensions': metricalarm.get('dimensions'), 'evaluationPeriods': metricalarm.get('evaluation_periods'), 'insufficientDataActions': metricalarm.get('insufficient_data_actions', []), 'metricName':metricalarm.get('metric_name'), 'namespace':metricalarm.get('namespace'), 'okactions':metricalarm.get('ok_actions', []), 'statistic':metricalarm.get('statistic'), 'threshold':metricalarm.get('threshold'), 'unit':metricalarm.get('unit'), } return alarm_for_json now = utils.utcnow() metricalarm = metricalarm.to_columns() # 메트릭 유무 확인 metric_key = self.cass.get_metric_key_or_create( project_id=project_id, namespace=metricalarm['namespace'], metric_name=metricalarm['metric_name'], dimensions=json.loads(metricalarm['dimensions']), unit=metricalarm['unit'], ) metricalarm['project_id'] = project_id metricalarm['metric_key'] = metric_key metricalarm['alarm_arn'] = "arn:spcs:synaps:%s:alarm:%s" % ( project_id, metricalarm['alarm_name'] ) metricalarm['alarm_configuration_updated_timestamp'] = now # 알람 유무 확인 alarm_key = self.cass.get_metric_alarm_key( project_id=project_id, alarm_name=metricalarm['alarm_name'] ) if alarm_key: history_type = 'Update' before_alarm = self.cass.get_metric_alarm(alarm_key) if before_alarm['metric_key'] != metricalarm['metric_key']: raise InvalidRequest("Metric cannot be changed.") metricalarm['state_updated_timestamp'] = \ before_alarm['state_updated_timestamp'] metricalarm['state_reason'] = before_alarm['state_reason'] metricalarm['state_reason_data'] = \ before_alarm['state_reason_data'] metricalarm['state_value'] = before_alarm['state_value'] else: history_type = "Create" alarm_key = uuid.uuid4() metricalarm['state_updated_timestamp'] = utils.utcnow() metricalarm['state_reason'] = "Unchecked: Initial alarm creation" metricalarm['state_reason_data'] = json.dumps({}) metricalarm['state_value'] = "INSUFFICIENT_DATA" # insert alarm into database self.cass.put_metric_alarm(alarm_key, metricalarm) LOG.debug("metric alarm inserted alarm key: %s" % (alarm_key)) # to make json, convert datetime type into str metricalarm['state_updated_timestamp'] = utils.strtime( metricalarm['state_updated_timestamp'] ) metricalarm['alarm_configuration_updated_timestamp'] = utils.strtime( metricalarm['alarm_configuration_updated_timestamp'] ) metricalarm['metric_key'] = str(metric_key) if history_type == "Update": history_data = json.dumps({ 'updatedAlarm':metricalarm_for_json(metricalarm), 'type':history_type, 'version': '1.0' }) summary = "Alarm %s updated" % metricalarm['alarm_name'] else: history_data = json.dumps({ 'createdAlarm': metricalarm_for_json(metricalarm), 'type':history_type, 'version': '1.0' }) summary = "Alarm %s created" % metricalarm['alarm_name'] history_key = uuid.uuid4() history_column = { 'project_id': project_id, 'alarm_key': alarm_key, 'alarm_name': metricalarm['alarm_name'], 'history_data': history_data, 'history_item_type': 'ConfigurationUpdate', 'history_summary':summary, 'timestamp': utils.utcnow() } self.cass.insert_alarm_history(history_key, history_column) message = {'project_id': project_id, 'metric_key': str(metric_key), 'metricalarm': metricalarm} self.rpc.send_msg(rpc.PUT_METRIC_ALARM_MSG_ID, message) LOG.info("PUT_METRIC_ALARM_MSG sent") return {} def put_metric_data(self, project_id, namespace, metric_name, dimensions, value, unit, timestamp, is_admin=False): """ metric data 를 입력받아 MQ 에 넣고 값이 빈 dictionary 를 반환한다. """ if namespace.startswith("SPCS/") and not is_admin: raise AdminRequired() message = {'project_id': project_id, 'namespace':namespace, 'metric_name': metric_name, 'dimensions': dimensions, 'value':value, 'unit':unit, 'timestamp':timestamp} self.rpc.send_msg(rpc.PUT_METRIC_DATA_MSG_ID, message) LOG.info("PUT_METRIC_DATA_MSG sent") return {}
class PutMetricBolt(storm.BasicBolt): BOLT_NAME = "PutMetricBolt" def initialize(self, stormconf, context): self.pid = os.getpid() self.cass = Cassandra() self.metrics = {} self.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def process_put_metric_data_msg(self, metric_key, message): """ Put metric data into both memory and database """ # Load statistics data in memory if metric_key not in self.metrics: max_retries = 3 for i in range(max_retries + 1): try: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) break except ResourceNotFound: if i + 1 < max_retries: LOG.warn("Metric %s is not in the database. " \ "retry... %d", metric_key, i + 1) time.sleep(1) else: LOG.error("Metric %s is not in the database.", metric_key) return timestamp = utils.parse_strtime(message['timestamp']) self.metrics[metric_key].put_metric_data(metric_key, timestamp=timestamp, value=message['value'], unit=message['unit']) def process_put_metric_alarm_msg(self, metric_key, message): def get_alarm_key(project_id, alarm_name): key = self.cass.get_metric_alarm_key(project_id, alarm_name) return key def metricalarm_for_json(metricalarm): cut = metricalarm.get('alarm_configuration_updated_timestamp') alarm_for_json = { 'actionEnabled': metricalarm.get('actions_enabled', False), 'alarmActions': metricalarm.get('alarm_actions', []), 'alarmArn': metricalarm.get('alarm_arn'), 'alarmConfigurationUpdatedTimestamp': utils.strtime(cut), 'alarmDescription': metricalarm.get('alarm_description'), 'alarmName': metricalarm.get('alarm_name'), 'comparisonOperator': metricalarm.get('comparison_operator'), 'dimensions': metricalarm.get('dimensions'), 'evaluationPeriods': metricalarm.get('evaluation_periods'), 'insufficientDataActions': metricalarm.get('insufficient_data_actions', []), 'metricName':metricalarm.get('metric_name'), 'namespace':metricalarm.get('namespace'), 'okactions':metricalarm.get('ok_actions', []), 'statistic':metricalarm.get('statistic'), 'threshold':metricalarm.get('threshold'), 'unit':metricalarm.get('unit'), } return alarm_for_json if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) project_id = message['project_id'] metricalarm = message['metricalarm'] # build metricalarm column, alarmhistory column alarm_key = get_alarm_key(project_id, metricalarm['alarm_name']) history_type = 'Update' if alarm_key else 'Create' now = utils.utcnow() if history_type == 'Update': original_alarm = self.cass.get_metric_alarm(alarm_key) for dict_key in ['state_updated_timestamp', 'state_reason', 'state_reason_data', 'state_value', 'project_id']: metricalarm[dict_key] = original_alarm[dict_key] metricalarm['alarm_configuration_updated_timestamp'] = now history_data = json.dumps({ 'updatedAlarm':metricalarm_for_json(metricalarm), 'type':history_type, 'version': '1.0' }) summary = "Alarm %s updated" % metricalarm['alarm_name'] else: alarm_key = uuid.uuid4() state_reason = "Unchecked: Initial alarm creation" metricalarm.update({'state_updated_timestamp': now, 'alarm_configuration_updated_timestamp': now, 'state_reason': state_reason, 'state_reason_data': json.dumps({}), 'state_value': "INSUFFICIENT_DATA", 'project_id': project_id}) history_data = json.dumps({ 'createdAlarm': metricalarm_for_json(metricalarm), 'type':history_type, 'version': '1.0' }) summary = "Alarm %s created" % metricalarm['alarm_name'] metricalarm['metric_key'] = metric_key history_key = uuid.uuid4() history_column = { 'project_id': project_id, 'alarm_key': alarm_key, 'alarm_name': metricalarm['alarm_name'], 'history_data': history_data, 'history_item_type': 'ConfigurationUpdate', 'history_summary':summary, 'timestamp': utils.utcnow() } self.cass.put_metric_alarm(alarm_key, metricalarm) self.cass.insert_alarm_history(history_key, history_column) LOG.info("metric alarm inserted: %s %s", alarm_key, metricalarm) # load metric in memory self.metrics[metric_key].put_alarm(alarm_key, metricalarm) def process_delete_metric_alarms_msg(self, metric_key, message): alarmkey = UUID(message['alarmkey']) LOG.debug("Metric keys %s", self.metrics.keys()) if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) self.metrics[metric_key].delete_metric_alarm(alarmkey) def process_set_alarm_state_msg(self, metric_key, message): project_id = message.get('project_id') alarm_name = message.get('alarm_name') state_reason_data = message.get('state_reason_data') if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) metric = self.metrics[metric_key] ret = self.cass.get_metric_alarm_key(project_id, alarm_name) if ret: alarm_key = ret try: metricalarm = metric.alarms[alarm_key] except KeyError: LOG.warn("alarm key [%s] is found, but alarm is not found.", alarm_key) return else: LOG.warn("alarm key [%s] is not found.", alarm_key) return metricalarm['state_reason'] = message.get('state_reason') metricalarm['state_value'] = message.get('state_value') metricalarm['state_reason_data'] = message.get('state_reason_data') # write into database alarm_columns = {'state_reason':message.get('state_reason'), 'state_value':message.get('state_value')} if state_reason_data: alarm_columns['state_reason_data'] = state_reason_data alarm_columns['project_id'] = project_id self.cass.put_metric_alarm(alarm_key, alarm_columns) def process_check_metric_alarms_msg(self, message): query_time = datetime.utcnow() stale_metrics = [] ready_to_evaluate = message.get('ready_to_evaluate') for key, metric in self.metrics.iteritems(): is_stale = metric.is_stale() if is_stale: stale_metrics.append(key) if (not is_stale) and ready_to_evaluate: metric.check_alarms(query_time) for key in stale_metrics: try: metric = self.metrics.pop(key) metric.delete() LOG.audit("Stale metric(%s) is deleted", str(key)) except KeyError: LOG.error("KeyError occurred when delete stale metric(%s)", str(key)) def process(self, tup): message = json.loads(tup.values[1]) message_id = message['message_id'] message_uuid = message.get('message_uuid', None) LOG.info("start processing msg[%s:%s]", message_id, message_uuid) try: metric_key = UUID(tup.values[0]) if tup.values[0] else None except ValueError: LOG.error("badly formed hexadecimal UUID string - %s", tup.values[0]) return if message_id == PUT_METRIC_DATA_MSG_ID: # message deduplicate if message_uuid: mckey = "%s_message_uuid" % message_uuid if not self.mc.get(mckey): # 300 seconds TTL self.mc.set(mckey, 1, 300) LOG.info("process put_metric_data_msg (%s)", message) self.process_put_metric_data_msg(metric_key, message) else: LOG.info("Message duplicated. %s", message_uuid) elif message_id == PUT_METRIC_ALARM_MSG_ID: LOG.info("process put_metric_alarm_msg (%s)", message) self.process_put_metric_alarm_msg(metric_key, message) elif message_id == DELETE_ALARMS_MSG_ID: LOG.info("process delete_alarms_msg (%s)", message) self.process_delete_metric_alarms_msg(metric_key, message) elif message_id == SET_ALARM_STATE_MSG_ID: LOG.info("process set_alarm_state_msg (%s)", message) self.process_set_alarm_state_msg(metric_key, message) elif message_id == CHECK_METRIC_ALARM_MSG_ID: LOG.info("process check_metric_alarm_msg (%s)", message) self.process_check_metric_alarms_msg(message) else: LOG.error("unknown message")
class UnpackMessageBolt(storm.BasicBolt): BOLT_NAME = "UnpackMessageBolt" def initialize(self, stormconf, context): self.pid = os.getpid() self.cass = Cassandra() self.key_dict = {} self.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def get_metric_key(self, message): project_id = message['project_id'] namespace = message['namespace'] metric_name = message['metric_name'] dimensions = message['dimensions'] unit = message['unit'] key = utils.generate_metric_key(project_id, namespace, metric_name, dimensions) memory_key = "metric_%s" % str(key) metric_key = self.mc.get(memory_key) if not metric_key: metric_key = self.cass.get_metric_key_or_create(project_id, namespace, metric_name, dimensions, unit) self.mc.set(memory_key, metric_key, 3000) return metric_key def get_alarm_metric_key(self, alarmkey): alarm = self.cass.get_metric_alarm(alarmkey) if alarm: return str(alarm.get('metric_key')) else: return None def process(self, tup): message_buf = tup.values[0] message = json.loads(message_buf) message_id = message['message_id'] message_uuid = message['message_uuid'] LOG.info("start processing msg[%s:%s]" % (message_id, message_uuid)) if message_id == PUT_METRIC_DATA_MSG_ID: metric_key = str(self.get_metric_key(message)) storm.emit([metric_key, message_buf]) elif message_id == PUT_METRIC_ALARM_MSG_ID: metric_key = message.get('metric_key') storm.emit([metric_key, message_buf]) elif message_id == DELETE_ALARMS_MSG_ID: project_id = message['project_id'] alarmkeys = message['alarmkeys'] for alarmkey in alarmkeys: try: alarmkey_uuid = UUID(alarmkey) metric_key = self.get_alarm_metric_key(alarmkey_uuid) metric_key = str(metric_key) if metric_key: message['alarmkey'] = alarmkey storm.emit([metric_key, json.dumps(message)]) except Exception as e: LOG.error("Alarm %s does not exists", alarmkey) elif message_id == SET_ALARM_STATE_MSG_ID: project_id = message['project_id'] alarm_name = message['alarm_name'] alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) if alarm_key: alarm = self.cass.get_metric_alarm(alarm_key) metric_key = str(alarm['metric_key']) storm.emit([metric_key, json.dumps(message)])
class API(object): ROLLING_FUNC_MAP = { 'Average': rolling_mean, 'Minimum': rolling_min, 'Maximum': rolling_max, 'SampleCount': rolling_sum, 'Sum': rolling_sum, } def __init__(self): self.cass = Cassandra() self.rpc = rpc.RemoteProcedureCall() def delete_alarms(self, context, project_id, alarm_names): alarmkeys = [] for alarm_name in alarm_names: k = self.cass.get_metric_alarm_key(project_id, alarm_name) if not k: raise ResourceNotFound("Alarm %s does not exists." % alarm_name) alarmkeys.append(str(k)) body = {'project_id': project_id, 'alarmkeys': alarmkeys, 'context': context.to_dict()} # UUID str self.rpc.send_msg(rpc.DELETE_ALARMS_MSG_ID, body) LOG.info("DELETE_ALARMS_MSG sent") def describe_alarms(self, project_id, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, next_token=None, state_value=None): """ params: project_id: string action_prefix: TODO: not implemented yet. alarm_name_prefix: string alarm_names: string list max_records: integer next_token: string (uuid type) state_value: string (OK | ALARM | INSUFFICIENT_DATA) """ alarms = self.cass.describe_alarms(project_id, action_prefix, alarm_name_prefix, alarm_names, max_records, next_token, state_value) return alarms def describe_alarms_for_metric(self, project_id, namespace, metric_name, dimensions=None, period=None, statistic=None, unit=None): """ params: project_id: string metric_name: string namespace: string dimensions: dict period: integer statistic: string (SampleCount | Average | Sum | Minimum | Maximum) unit: string """ alarms = self.cass.describe_alarms_for_metric(project_id, namespace, metric_name, dimensions=dimensions, period=period, statistic=statistic, unit=unit) return alarms def describe_alarm_history(self, project_id, alarm_name=None, end_date=None, history_item_type=None, max_records=None, next_token=None, start_date=None): histories = self.cass.describe_alarm_history( alarm_name=alarm_name, end_date=end_date, history_item_type=history_item_type, max_records=max_records, next_token=next_token, start_date=start_date, project_id=project_id ) return histories def set_alarm_actions(self, context, project_id, alarm_names, enabled): for alarm_name in alarm_names: alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) if not alarm_key: raise InvalidParameterValue("Alarm %s does not exist" % alarm_name) for alarm_name in alarm_names: alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) history_data = {'actions_enabled':enabled, 'project_id': project_id} self.cass.put_metric_alarm(alarm_key, history_data) if enabled: summary = "Alarm actions for %s are enabled" % alarm_name else: summary = "Alarm actions for %s are disabled" % alarm_name history_key = uuid.uuid4() history_column = { 'project_id': project_id, 'alarm_key': alarm_key, 'alarm_name': alarm_name, 'history_data': json.dumps(history_data), 'history_item_type': 'ConfigurationUpdate', 'history_summary':summary, 'timestamp': utils.utcnow() } self.cass.insert_alarm_history(history_key, history_column) def set_alarm_state(self, context, project_id, alarm_name, state_reason, state_value, state_reason_data=None): k = self.cass.get_metric_alarm_key(project_id, alarm_name) if not k: raise ResourceNotFound("Alarm %s does not exists." % alarm_name) body = {'project_id': project_id, 'alarm_name': alarm_name, 'state_reason': state_reason, 'state_value': state_value, 'state_reason_data': state_reason_data, 'context': context.to_dict()} self.rpc.send_msg(rpc.SET_ALARM_STATE_MSG_ID, body) LOG.info("SET_ALARM_STATE_MSG sent") def get_metric_statistics(self, project_id, end_time, metric_name, namespace, period, start_time, statistics, unit=None, dimensions=None): """ 입력받은 조건에 일치하는 메트릭의 통계자료 리스트를 반환한다. """ def to_datapoint(df, idx): datapoint = df.ix[idx].dropna() if len(datapoint): return idx, datapoint end_idx = end_time.replace(second=0, microsecond=0) start_idx = start_time.replace(second=0, microsecond=0) start_ana_idx = start_idx - datetools.Minute() * (period / 60) daterange = DateRange(start_idx, end_idx, offset=datetools.Minute()) daterange_ana = DateRange(start_ana_idx, end_idx, offset=datetools.Minute()) # load default unit for metric from database if unit == "None" or not unit: metric_key = self.cass.get_metric_key( project_id=project_id, namespace=namespace, metric_name=metric_name, dimensions=dimensions ) if metric_key: unit = self.cass.get_metric_unit(metric_key) else: unit = "None" # load statistics data from database stats = self.cass.get_metric_statistics( project_id=project_id, namespace=namespace, metric_name=metric_name, start_time=start_ana_idx, end_time=end_time, period=period, statistics=statistics, dimensions=dimensions ) period = period / 60 # convert sec to min stat = DataFrame(index=daterange) for statistic, series in zip(statistics, stats): func = self.ROLLING_FUNC_MAP[statistic] ts = TimeSeries(series, index=daterange_ana) rolled_ts = func(ts, period, min_periods=0) stat[statistic] = rolled_ts.ix[::period] LOG.debug("stat %s\n%s" % (statistic, stat[statistic])) ret = filter(None, (to_datapoint(stat, i) for i in stat.index)) return ret, unit def list_metrics(self, project_id, next_token=None, dimensions=None, metric_name=None, namespace=None): """ List Metrics """ metrics = self.cass.list_metrics(project_id, namespace, metric_name, dimensions, next_token) return metrics def put_metric_alarm(self, context, project_id, metricalarm): """ Send put metric alarm message to Storm """ def _validate_actions(alarm): for actions in (alarm.ok_actions, alarm.insufficient_data_actions, alarm.alarm_actions): for action in actions: if utils.validate_groupnotification_action(action): group = utils.parse_groupnotification_action(action) if not self.cass.get_notification_group(group): raise InvalidNotificationGroup() now = utils.utcnow() _validate_actions(metricalarm) metricalarm = metricalarm.to_columns() alarm_name = metricalarm['alarm_name'] namespace = metricalarm['namespace'] metric_name = metricalarm['metric_name'] dimensions = json.loads(metricalarm['dimensions']) # check if we have metric in database metric_key = self.cass.get_metric_key_or_create(project_id=project_id, namespace=namespace, metric_name=metric_name, dimensions=dimensions, unit=metricalarm['unit']) update_data = { 'project_id': project_id, 'metric_key': str(metric_key), 'alarm_arn': "arn:spcs:synaps:%s:alarm:%s" % (project_id, alarm_name), 'alarm_configuration_updated_timestamp': utils.strtime(now) } metricalarm.update(update_data) # check if metric is changed alarm_key = self.cass.get_metric_alarm_key(project_id=project_id, alarm_name=alarm_name) if alarm_key: original_alarm = self.cass.get_metric_alarm(alarm_key) if (str(original_alarm['metric_key']) != str(metricalarm['metric_key'])): raise InvalidRequest("Metric cannot be changed. " "Delete alarm and retry.") else: # If alarm is newly added, check quotas # check alarm quota per project project_quota = FLAGS.get('alarm_quota_per_project') alarms_in_project = self.cass.get_alarm_count(project_id) if alarms_in_project >= project_quota: LOG.info("Too many alarms(%d) in the project %s", alarms_in_project, project_id) raise ProjectAlarmQuotaExceeded() # check alarm quota per metric metric_quota = FLAGS.get('alarm_quota_per_metric') alarms_per_metric = self.cass.get_alarms_per_metric_count( project_id, namespace, metric_name, dimensions) if alarms_per_metric >= metric_quota: LOG.info("Too many alarms(%d) for this metric", alarms_per_metric) raise MetricAlarmQuotaExceeded() message = {'project_id': project_id, 'metric_key': str(metric_key), 'metricalarm': metricalarm, 'context': context.to_dict()} self.rpc.send_msg(rpc.PUT_METRIC_ALARM_MSG_ID, message) LOG.info("PUT_METRIC_ALARM_MSG sent") return {} def put_metric_data(self, context, project_id, namespace, metric_name, dimensions, value, unit, timestamp=None, is_admin=False): admin_namespace = FLAGS.get('admin_namespace') if namespace.startswith(admin_namespace) and not is_admin: raise AdminRequired() timestamp = timestamp or utils.strtime(utils.utcnow()) message = {'project_id': project_id, 'namespace':namespace, 'metric_name': metric_name, 'dimensions': dimensions, 'value':value, 'unit':unit, 'timestamp':timestamp, 'context': context.to_dict()} self.rpc.send_msg(rpc.PUT_METRIC_DATA_MSG_ID, message) LOG.info("PUT_METRIC_DATA_MSG sent") return {}
class UnpackMessageBolt(storm.BasicBolt): BOLT_NAME = "UnpackMessageBolt" def initialize(self, stormconf, context): self.pid = os.getpid() self.cass = Cassandra() self.key_dict = {} self.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def get_metric_key(self, message): project_id = message['project_id'] namespace = message['namespace'] metric_name = message['metric_name'] dimensions = message['dimensions'] unit = message['unit'] key = utils.generate_metric_key(project_id, namespace, metric_name, dimensions) memory_key = "metric_%s" % str(key) metric_key = self.mc.get(memory_key) if not metric_key: metric_key = self.cass.get_metric_key_or_create( project_id, namespace, metric_name, dimensions, unit) self.mc.set(memory_key, metric_key, 3000) return metric_key def get_alarm_metric_key(self, alarmkey): alarm = self.cass.get_metric_alarm(alarmkey) if alarm: return str(alarm.get('metric_key')) else: return None def process(self, tup): message_buf = tup.values[0] message = json.loads(message_buf) message_id = message['message_id'] message_uuid = message['message_uuid'] LOG.info("start processing msg[%s:%s]" % (message_id, message_uuid)) if message_id == PUT_METRIC_DATA_MSG_ID: metric_key = str(self.get_metric_key(message)) storm.emit([metric_key, message_buf]) elif message_id == PUT_METRIC_ALARM_MSG_ID: metric_key = message.get('metric_key') storm.emit([metric_key, message_buf]) elif message_id == DELETE_ALARMS_MSG_ID: project_id = message['project_id'] alarmkeys = message['alarmkeys'] for alarmkey in alarmkeys: try: alarmkey_uuid = UUID(alarmkey) metric_key = self.get_alarm_metric_key(alarmkey_uuid) metric_key = str(metric_key) if metric_key: message['alarmkey'] = alarmkey storm.emit([metric_key, json.dumps(message)]) except Exception as e: LOG.error("Alarm %s does not exists", alarmkey) elif message_id == SET_ALARM_STATE_MSG_ID: project_id = message['project_id'] alarm_name = message['alarm_name'] alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) if alarm_key: alarm = self.cass.get_metric_alarm(alarm_key) metric_key = str(alarm['metric_key']) storm.emit([metric_key, json.dumps(message)])
class PutMetricBolt(storm.BasicBolt): BOLT_NAME = "PutMetricBolt" def initialize(self, stormconf, context): self.pid = os.getpid() self.cass = Cassandra() self.metrics = {} self.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def process_put_metric_data_msg(self, metric_key, message): """ Put metric data into both memory and database """ # Load statistics data in memory if metric_key not in self.metrics: max_retries = 3 for i in range(max_retries + 1): try: self.metrics[metric_key] = MetricMonitor( metric_key, self.cass) break except ResourceNotFound: if i + 1 < max_retries: LOG.warn("Metric %s is not in the database. " \ "retry... %d", metric_key, i + 1) time.sleep(1) else: LOG.error("Metric %s is not in the database.", metric_key) return timestamp = utils.parse_strtime(message['timestamp']) self.metrics[metric_key].put_metric_data(metric_key, timestamp=timestamp, value=message['value'], unit=message['unit']) def process_put_metric_alarm_msg(self, metric_key, message): def get_alarm_key(project_id, alarm_name): key = self.cass.get_metric_alarm_key(project_id, alarm_name) return key def metricalarm_for_json(metricalarm): cut = metricalarm.get('alarm_configuration_updated_timestamp') alarm_for_json = { 'actionEnabled': metricalarm.get('actions_enabled', False), 'alarmActions': metricalarm.get('alarm_actions', []), 'alarmArn': metricalarm.get('alarm_arn'), 'alarmConfigurationUpdatedTimestamp': utils.strtime(cut), 'alarmDescription': metricalarm.get('alarm_description'), 'alarmName': metricalarm.get('alarm_name'), 'comparisonOperator': metricalarm.get('comparison_operator'), 'dimensions': metricalarm.get('dimensions'), 'evaluationPeriods': metricalarm.get('evaluation_periods'), 'insufficientDataActions': metricalarm.get('insufficient_data_actions', []), 'metricName': metricalarm.get('metric_name'), 'namespace': metricalarm.get('namespace'), 'okactions': metricalarm.get('ok_actions', []), 'statistic': metricalarm.get('statistic'), 'threshold': metricalarm.get('threshold'), 'unit': metricalarm.get('unit'), } return alarm_for_json if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) project_id = message['project_id'] metricalarm = message['metricalarm'] # build metricalarm column, alarmhistory column alarm_key = get_alarm_key(project_id, metricalarm['alarm_name']) history_type = 'Update' if alarm_key else 'Create' now = utils.utcnow() if history_type == 'Update': original_alarm = self.cass.get_metric_alarm(alarm_key) for dict_key in [ 'state_updated_timestamp', 'state_reason', 'state_reason_data', 'state_value', 'project_id' ]: metricalarm[dict_key] = original_alarm[dict_key] metricalarm['alarm_configuration_updated_timestamp'] = now history_data = json.dumps({ 'updatedAlarm': metricalarm_for_json(metricalarm), 'type': history_type, 'version': '1.0' }) summary = "Alarm %s updated" % metricalarm['alarm_name'] else: alarm_key = uuid.uuid4() state_reason = "Unchecked: Initial alarm creation" metricalarm.update({ 'state_updated_timestamp': now, 'alarm_configuration_updated_timestamp': now, 'state_reason': state_reason, 'state_reason_data': json.dumps({}), 'state_value': "INSUFFICIENT_DATA", 'project_id': project_id }) history_data = json.dumps({ 'createdAlarm': metricalarm_for_json(metricalarm), 'type': history_type, 'version': '1.0' }) summary = "Alarm %s created" % metricalarm['alarm_name'] metricalarm['metric_key'] = metric_key history_key = uuid.uuid4() history_column = { 'project_id': project_id, 'alarm_key': alarm_key, 'alarm_name': metricalarm['alarm_name'], 'history_data': history_data, 'history_item_type': 'ConfigurationUpdate', 'history_summary': summary, 'timestamp': utils.utcnow() } self.cass.put_metric_alarm(alarm_key, metricalarm) self.cass.insert_alarm_history(history_key, history_column) LOG.info("metric alarm inserted: %s %s", alarm_key, metricalarm) # load metric in memory self.metrics[metric_key].put_alarm(alarm_key, metricalarm) def process_delete_metric_alarms_msg(self, metric_key, message): alarmkey = UUID(message['alarmkey']) LOG.debug("Metric keys %s", self.metrics.keys()) if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) self.metrics[metric_key].delete_metric_alarm(alarmkey) def process_set_alarm_state_msg(self, metric_key, message): project_id = message.get('project_id') alarm_name = message.get('alarm_name') state_reason_data = message.get('state_reason_data') if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) metric = self.metrics[metric_key] ret = self.cass.get_metric_alarm_key(project_id, alarm_name) if ret: alarm_key = ret try: metricalarm = metric.alarms[alarm_key] except KeyError: LOG.warn("alarm key [%s] is found, but alarm is not found.", alarm_key) return else: LOG.warn("alarm key [%s] is not found.", alarm_key) return metricalarm['state_reason'] = message.get('state_reason') metricalarm['state_value'] = message.get('state_value') metricalarm['state_reason_data'] = message.get('state_reason_data') # write into database alarm_columns = { 'state_reason': message.get('state_reason'), 'state_value': message.get('state_value') } if state_reason_data: alarm_columns['state_reason_data'] = state_reason_data alarm_columns['project_id'] = project_id self.cass.put_metric_alarm(alarm_key, alarm_columns) def process_check_metric_alarms_msg(self, message): query_time = datetime.utcnow() stale_metrics = [] ready_to_evaluate = message.get('ready_to_evaluate') for key, metric in self.metrics.iteritems(): is_stale = metric.is_stale() if is_stale: stale_metrics.append(key) if (not is_stale) and ready_to_evaluate: metric.check_alarms(query_time) for key in stale_metrics: try: metric = self.metrics.pop(key) metric.delete() LOG.audit("Stale metric(%s) is deleted", str(key)) except KeyError: LOG.error("KeyError occurred when delete stale metric(%s)", str(key)) def process(self, tup): message = json.loads(tup.values[1]) message_id = message['message_id'] message_uuid = message.get('message_uuid', None) LOG.info("start processing msg[%s:%s]", message_id, message_uuid) try: metric_key = UUID(tup.values[0]) if tup.values[0] else None except ValueError: LOG.error("badly formed hexadecimal UUID string - %s", tup.values[0]) return if message_id == PUT_METRIC_DATA_MSG_ID: # message deduplicate if message_uuid: mckey = "%s_message_uuid" % message_uuid if not self.mc.get(mckey): # 300 seconds TTL self.mc.set(mckey, 1, 300) LOG.info("process put_metric_data_msg (%s)", message) self.process_put_metric_data_msg(metric_key, message) else: LOG.info("Message duplicated. %s", message_uuid) elif message_id == PUT_METRIC_ALARM_MSG_ID: LOG.info("process put_metric_alarm_msg (%s)", message) self.process_put_metric_alarm_msg(metric_key, message) elif message_id == DELETE_ALARMS_MSG_ID: LOG.info("process delete_alarms_msg (%s)", message) self.process_delete_metric_alarms_msg(metric_key, message) elif message_id == SET_ALARM_STATE_MSG_ID: LOG.info("process set_alarm_state_msg (%s)", message) self.process_set_alarm_state_msg(metric_key, message) elif message_id == CHECK_METRIC_ALARM_MSG_ID: LOG.info("process check_metric_alarm_msg (%s)", message) self.process_check_metric_alarms_msg(message) else: LOG.error("unknown message")
class API(object): ROLLING_FUNC_MAP = { 'Average': rolling_mean, 'Minimum': rolling_min, 'Maximum': rolling_max, 'SampleCount': rolling_sum, 'Sum': rolling_sum, } def __init__(self): self.cass = Cassandra() self.rpc = rpc.RemoteProcedureCall() def delete_alarms(self, context, project_id, alarm_names): alarmkeys = [] for alarm_name in alarm_names: k = self.cass.get_metric_alarm_key(project_id, alarm_name) if not k: raise ResourceNotFound("Alarm %s does not exists." % alarm_name) alarmkeys.append(str(k)) body = { 'project_id': project_id, 'alarmkeys': alarmkeys, 'context': context.to_dict() } # UUID str self.rpc.send_msg(rpc.DELETE_ALARMS_MSG_ID, body) LOG.info("DELETE_ALARMS_MSG sent") def describe_alarms(self, project_id, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, next_token=None, state_value=None): """ params: project_id: string action_prefix: TODO: not implemented yet. alarm_name_prefix: string alarm_names: string list max_records: integer next_token: string (uuid type) state_value: string (OK | ALARM | INSUFFICIENT_DATA) """ alarms = self.cass.describe_alarms(project_id, action_prefix, alarm_name_prefix, alarm_names, max_records, next_token, state_value) return alarms def describe_alarms_for_metric(self, project_id, namespace, metric_name, dimensions=None, period=None, statistic=None, unit=None): """ params: project_id: string metric_name: string namespace: string dimensions: dict period: integer statistic: string (SampleCount | Average | Sum | Minimum | Maximum) unit: string """ alarms = self.cass.describe_alarms_for_metric(project_id, namespace, metric_name, dimensions=dimensions, period=period, statistic=statistic, unit=unit) return alarms def describe_alarm_history(self, project_id, alarm_name=None, end_date=None, history_item_type=None, max_records=None, next_token=None, start_date=None): histories = self.cass.describe_alarm_history( alarm_name=alarm_name, end_date=end_date, history_item_type=history_item_type, max_records=max_records, next_token=next_token, start_date=start_date, project_id=project_id) return histories def set_alarm_actions(self, context, project_id, alarm_names, enabled): for alarm_name in alarm_names: alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) if not alarm_key: raise InvalidParameterValue("Alarm %s does not exist" % alarm_name) for alarm_name in alarm_names: alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name) history_data = { 'actions_enabled': enabled, 'project_id': project_id } self.cass.put_metric_alarm(alarm_key, history_data) if enabled: summary = "Alarm actions for %s are enabled" % alarm_name else: summary = "Alarm actions for %s are disabled" % alarm_name history_key = uuid.uuid4() history_column = { 'project_id': project_id, 'alarm_key': alarm_key, 'alarm_name': alarm_name, 'history_data': json.dumps(history_data), 'history_item_type': 'ConfigurationUpdate', 'history_summary': summary, 'timestamp': utils.utcnow() } self.cass.insert_alarm_history(history_key, history_column) def set_alarm_state(self, context, project_id, alarm_name, state_reason, state_value, state_reason_data=None): k = self.cass.get_metric_alarm_key(project_id, alarm_name) if not k: raise ResourceNotFound("Alarm %s does not exists." % alarm_name) body = { 'project_id': project_id, 'alarm_name': alarm_name, 'state_reason': state_reason, 'state_value': state_value, 'state_reason_data': state_reason_data, 'context': context.to_dict() } self.rpc.send_msg(rpc.SET_ALARM_STATE_MSG_ID, body) LOG.info("SET_ALARM_STATE_MSG sent") def get_metric_statistics(self, project_id, end_time, metric_name, namespace, period, start_time, statistics, unit=None, dimensions=None): """ 입력받은 조건에 일치하는 메트릭의 통계자료 리스트를 반환한다. """ def to_datapoint(df, idx): datapoint = df.ix[idx].dropna() if len(datapoint): return idx, datapoint end_idx = end_time.replace(second=0, microsecond=0) start_idx = start_time.replace(second=0, microsecond=0) start_ana_idx = start_idx - datetools.Minute() * (period / 60) daterange = DateRange(start_idx, end_idx, offset=datetools.Minute()) daterange_ana = DateRange(start_ana_idx, end_idx, offset=datetools.Minute()) # load default unit for metric from database if unit == "None" or not unit: metric_key = self.cass.get_metric_key(project_id=project_id, namespace=namespace, metric_name=metric_name, dimensions=dimensions) if metric_key: unit = self.cass.get_metric_unit(metric_key) else: unit = "None" # load statistics data from database stats = self.cass.get_metric_statistics(project_id=project_id, namespace=namespace, metric_name=metric_name, start_time=start_ana_idx, end_time=end_time, period=period, statistics=statistics, dimensions=dimensions) period = period / 60 # convert sec to min stat = DataFrame(index=daterange) for statistic, series in zip(statistics, stats): func = self.ROLLING_FUNC_MAP[statistic] ts = TimeSeries(series, index=daterange_ana) rolled_ts = func(ts, period, min_periods=0) stat[statistic] = rolled_ts.ix[::period] LOG.debug("stat %s\n%s" % (statistic, stat[statistic])) ret = filter(None, (to_datapoint(stat, i) for i in stat.index)) return ret, unit def list_metrics(self, project_id, next_token=None, dimensions=None, metric_name=None, namespace=None): """ List Metrics """ metrics = self.cass.list_metrics(project_id, namespace, metric_name, dimensions, next_token) return metrics def put_metric_alarm(self, context, project_id, metricalarm): """ Send put metric alarm message to Storm """ def _validate_actions(alarm): for actions in (alarm.ok_actions, alarm.insufficient_data_actions, alarm.alarm_actions): for action in actions: if utils.validate_groupnotification_action(action): group = utils.parse_groupnotification_action(action) if not self.cass.get_notification_group(group): raise InvalidNotificationGroup() now = utils.utcnow() _validate_actions(metricalarm) metricalarm = metricalarm.to_columns() alarm_name = metricalarm['alarm_name'] namespace = metricalarm['namespace'] metric_name = metricalarm['metric_name'] dimensions = json.loads(metricalarm['dimensions']) # check if we have metric in database metric_key = self.cass.get_metric_key_or_create( project_id=project_id, namespace=namespace, metric_name=metric_name, dimensions=dimensions, unit=metricalarm['unit']) update_data = { 'project_id': project_id, 'metric_key': str(metric_key), 'alarm_arn': "arn:spcs:synaps:%s:alarm:%s" % (project_id, alarm_name), 'alarm_configuration_updated_timestamp': utils.strtime(now) } metricalarm.update(update_data) # check if metric is changed alarm_key = self.cass.get_metric_alarm_key(project_id=project_id, alarm_name=alarm_name) if alarm_key: original_alarm = self.cass.get_metric_alarm(alarm_key) if (str(original_alarm['metric_key']) != str( metricalarm['metric_key'])): raise InvalidRequest("Metric cannot be changed. " "Delete alarm and retry.") else: # If alarm is newly added, check quotas # check alarm quota per project project_quota = FLAGS.get('alarm_quota_per_project') alarms_in_project = self.cass.get_alarm_count(project_id) if alarms_in_project >= project_quota: LOG.info("Too many alarms(%d) in the project %s", alarms_in_project, project_id) raise ProjectAlarmQuotaExceeded() # check alarm quota per metric metric_quota = FLAGS.get('alarm_quota_per_metric') alarms_per_metric = self.cass.get_alarms_per_metric_count( project_id, namespace, metric_name, dimensions) if alarms_per_metric >= metric_quota: LOG.info("Too many alarms(%d) for this metric", alarms_per_metric) raise MetricAlarmQuotaExceeded() message = { 'project_id': project_id, 'metric_key': str(metric_key), 'metricalarm': metricalarm, 'context': context.to_dict() } self.rpc.send_msg(rpc.PUT_METRIC_ALARM_MSG_ID, message) LOG.info("PUT_METRIC_ALARM_MSG sent") return {} def put_metric_data(self, context, project_id, namespace, metric_name, dimensions, value, unit, timestamp=None, is_admin=False): admin_namespace = FLAGS.get('admin_namespace') if namespace.startswith(admin_namespace) and not is_admin: raise AdminRequired() timestamp = timestamp or utils.strtime(utils.utcnow()) message = { 'project_id': project_id, 'namespace': namespace, 'metric_name': metric_name, 'dimensions': dimensions, 'value': value, 'unit': unit, 'timestamp': timestamp, 'context': context.to_dict() } self.rpc.send_msg(rpc.PUT_METRIC_DATA_MSG_ID, message) LOG.info("PUT_METRIC_DATA_MSG sent") return {}