def describe_alarm_history(self, context, alarm_name=None, end_date=None, history_item_type=None, max_records=100, next_token=None, start_date=None, project_id=None): def to_alarm_history(v): ret = { 'alarm_name': v['alarm_name'], 'history_data': v['history_data'], 'history_item_type': v['history_item_type'], 'history_summary': v['history_summary'], 'timestamp': utils.strtime_trunk(v['timestamp']) } return ret if not (project_id and context.is_admin): project_id = context.project_id self.check_alarm_name(alarm_name) self.check_history_item_type(history_item_type) self.check_next_token(next_token) ret_dict = {} ret_histories = [] end_date = utils.parse_strtime(end_date) if end_date else end_date start_date = utils.parse_strtime(start_date) \ if start_date else start_date LOG.debug("request to database for alarm history") histories = self.monitor_api.describe_alarm_history( alarm_name=alarm_name, end_date=end_date, history_item_type=history_item_type, max_records=max_records + 1, next_token=next_token, start_date=start_date, project_id=project_id ) LOG.debug("convert to list") histories = list(histories) LOG.debug("to list %d", len(histories)) LOG.debug("start to read histories") for i, (k, v) in enumerate(histories): if i >= max_records: next_token = k LOG.debug("reached to the number of max records") break ret_histories.append(to_alarm_history(v)) LOG.debug("not reached to the number of max records") else: next_token = None ret_dict['describe_alarm_history_result'] = {'alarm_history_items': ret_histories} if next_token: ret_dict['describe_alarm_history_result']['next_token'] = \ str(next_token) return ret_dict
def get_metric_statistics(self, context, end_time, metric_name, namespace, period, start_time, statistics, unit=None, dimensions=None, project_id=None): """ Gets statistics for the specified metric. """ def stat_to_datapoint(stat): """ 단위 변경 및 형식 변경 """ timestamp, values = stat ret = {} ret['Timestamp'] = timestamp for statistic, value in values.iteritems(): if statistic == "SampleCount": ret['Unit'] = "Count" ret[statistic] = value else: ret['Unit'] = (unit if unit != 'None' else None) ret[statistic] = utils.to_unit(value, unit) return ret if not (project_id and context.is_admin): project_id = context.project_id end_time = utils.parse_strtime(end_time) start_time = utils.parse_strtime(start_time) dimensions = utils.extract_member_dict(dimensions) statistics = utils.extract_member_list(statistics) self.check_dimensions(dimensions) self.check_metric_name(metric_name) self.check_namespace(namespace) self.check_statistics(statistics) self.check_unit(unit) self._validate_period(period) self.validate_get_metric_statistics(start_time, end_time, period) stats, unit = self.monitor_api.get_metric_statistics( project_id, end_time, metric_name, namespace, period, start_time, statistics, unit, dimensions) datapoints = map(stat_to_datapoint, stats) label = metric_name return {'GetMetricStatisticsResult': {'Datapoints': datapoints, 'Label': label}}
def describe_alarm_history(self, context, alarm_name=None, end_date=None, history_item_type=None, max_records=None, next_token=None, start_date=None, project_id=None): def to_alarm_history(v): ret = { 'alarm_name': v['alarm_name'], 'history_data': v['history_data'], 'history_item_type': v['history_item_type'], 'history_summary': v['history_summary'], 'timestamp': utils.strtime_trunk(v['timestamp']) } return ret if not (project_id and context.is_admin): project_id = context.project_id self.check_alarm_name(alarm_name) self.check_history_item_type(history_item_type) self.check_next_token(next_token) ret_dict = {} ret_histories = [] max_records = int(max_records) if max_records else 100 end_date = utils.parse_strtime(end_date) if end_date else end_date start_date = utils.parse_strtime(start_date) \ if start_date else start_date histories = self.monitor_api.describe_alarm_history( alarm_name=alarm_name, end_date=end_date, history_item_type=history_item_type, max_records=max_records + 1, next_token=next_token, start_date=start_date, project_id=project_id ) for i, (k, v) in enumerate(histories): if i >= max_records: next_token = k break ret_histories.append(to_alarm_history(v)) else: next_token = None ret_dict['describe_alarm_history_result'] = {'alarm_history_items': ret_histories} if next_token: ret_dict['describe_alarm_history_result']['next_token'] = \ str(next_token) return ret_dict
def process_put_metric_data_msg(self, metric_key, message): """ Put metric data into both memory and database """ # Load statistics data in memory if metric_key not in self.metrics: max_retries = 3 for i in range(max_retries + 1): try: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) break except ResourceNotFound: if i + 1 < max_retries: LOG.warn("Metric %s is not in the database. " \ "retry... %d", metric_key, i + 1) time.sleep(1) else: LOG.error("Metric %s is not in the database.", metric_key) return timestamp = utils.parse_strtime(message['timestamp']) self.metrics[metric_key].put_metric_data(metric_key, timestamp=timestamp, value=message['value'], unit=message['unit'])
def parse_metric_data(metric): try: dimensions_ = metric.get('dimensions', {}) dimensions = utils.extract_member_dict(dimensions_) except KeyError: err = "Unsuitable Dimensions Value - %s" % str(dimensions_) raise InvalidParameterValue(err) self.check_dimensions(dimensions) metric_name = metric.get('metric_name') unit = metric.get('unit', 'None') value = metric.get('value') req_timestamp = metric.get('timestamp') timestamp = req_timestamp if req_timestamp \ else utils.strtime(utils.utcnow()) timebound = (datetime.datetime.utcnow() - datetime.timedelta( seconds=FLAGS.get('statistics_ttl'))) if utils.parse_strtime(timestamp) < timebound: err = "Stale metric data - %s" % timestamp raise InvalidParameterValue(err) self.check_metric_name(metric_name) self.check_unit(unit) return metric_name, dimensions, value, unit, timestamp
def process_put_metric_data_msg(self, metric_key, message): """ Put metric data into both memory and database """ # Load statistics data in memory if metric_key not in self.metrics: max_retries = 3 for i in range(max_retries + 1): try: self.metrics[metric_key] = MetricMonitor( metric_key, self.cass) break except ResourceNotFound: if i + 1 < max_retries: LOG.warn("Metric %s is not in the database. " \ "retry... %d", metric_key, i + 1) time.sleep(1) else: LOG.error("Metric %s is not in the database.", metric_key) return timestamp = utils.parse_strtime(message['timestamp']) self.metrics[metric_key].put_metric_data(metric_key, timestamp=timestamp, value=message['value'], unit=message['unit'])
def get_state_update_value(h): """ """ oldstate = h.data['oldState']['stateValue'] newstate = h.data['newState']['stateValue'] querydate = h.data['newState']['stateReasonData']['queryDate'] querydate = utils.parse_strtime(querydate) return oldstate, newstate, querydate
def __init__( self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, strategy="noauth", overwrite=True, ): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. """ self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = "admin" in [x.lower() for x in self.roles] elif self.is_admin and "admin" not in self.roles: self.roles.append("admin") self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token self.strategy = strategy if overwrite or not hasattr(local.store, "context"): local.store.context = self
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, strategy='noauth', overwrite=True): """ :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. """ self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = 'admin' in [x.lower() for x in self.roles] elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() if isinstance(timestamp, basestring): timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token self.strategy = strategy if overwrite or not hasattr(local.store, 'context'): local.store.context = self
def process_put_metric_data_msg(self, metric_key, message): """ 데이터베이스에 MetricArchive 컬럼패밀리에 입력된 값 추가. 메모리 (self.metrics)에도 입력된 값 추가. 메모리 상의 메트릭을 기반으로 데이터베이스에 StatArchive 컬럼패밀리 업데이트. """ # 메시지 값이 없는 경우 종료 if message["value"] is None: return # 메트릭 가져오기 if metric_key not in self.metrics: self.metrics[metric_key] = MetricMonitor(metric_key, self.cass) timestamp = utils.parse_strtime(message["timestamp"]) self.metrics[metric_key].put_metric_data( metric_key, timestamp=timestamp, value=message["value"], unit=message["unit"] )
def get_state_update_value(h): oldstate = h.data['oldState']['stateValue'] newstate = h.data['newState']['stateValue'] querydate = h.data['newState']['stateReasonData']['queryDate'] querydate = utils.parse_strtime(querydate) return oldstate, newstate, querydate