Ejemplo n.º 1
0
    def meter_sms_actions(self, project_id, receivers):
        ctxt = get_admin_context()
        local_receivers = [r for r in receivers if r.startswith("+82")]
        international_receivers = [
            r for r in receivers if not r.startswith("+82")
        ]

        self.api.put_metric_data(ctxt,
                                 project_id,
                                 namespace="SPCS/SYNAPS",
                                 metric_name="LocalSMSActionCount",
                                 dimensions={},
                                 value=len(local_receivers),
                                 unit="Count",
                                 timestamp=utils.strtime(utils.utcnow()),
                                 is_admin=True)

        self.api.put_metric_data(ctxt,
                                 project_id,
                                 namespace="SPCS/SYNAPS",
                                 metric_name="InternationalSMSActionCount",
                                 dimensions={},
                                 value=len(international_receivers),
                                 unit="Count",
                                 timestamp=utils.strtime(utils.utcnow()),
                                 is_admin=True)

        LOG.audit("Meter SMS: %s %s %s", project_id, len(receivers), receivers)
Ejemplo n.º 2
0
 def set_alarm_actions(self, context, project_id, alarm_names, enabled):
     for alarm_name in alarm_names:
         alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name)
         if not alarm_key:
             raise InvalidParameterValue("Alarm %s does not exist" % 
                                         alarm_name)
     
     for alarm_name in alarm_names:
         alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name)
         history_data = {'actions_enabled':enabled,
                         'project_id': project_id}
         self.cass.put_metric_alarm(alarm_key, history_data)
         
         if enabled:
             summary = "Alarm actions for %s are enabled" % alarm_name
         else:
             summary = "Alarm actions for %s are disabled" % alarm_name
         history_key = uuid.uuid4()
         history_column = {
             'project_id': project_id,
             'alarm_key': alarm_key,
             'alarm_name': alarm_name,
             'history_data': json.dumps(history_data),
             'history_item_type': 'ConfigurationUpdate',
             'history_summary':summary,
             'timestamp': utils.utcnow()
         }
         self.cass.insert_alarm_history(history_key, history_column)            
Ejemplo n.º 3
0
    def put_metric_data(self, metric_key, timestamp, value, unit=None):
        def get_stats(tmp_stat):
            try:
                ret = dict(zip(self.COLUMNS, map(lambda x: x.values()[0], tmp_stat)))
                for v in ret:
                    if v == None:
                        v = float("nan")
            except IndexError:
                storm.log("index %s is not in DB." % time_idx)
                ret = {
                    "SampleCount": float("nan"),
                    "Sum": float("nan"),
                    "Average": float("nan"),
                    "Minimum": float("nan"),
                    "Maximum": float("nan"),
                }
            return ret

        time_idx = timestamp.replace(second=0, microsecond=0)
        time_diff = utils.utcnow() - time_idx

        if timedelta(seconds=self.STATISTICS_TTL) < time_diff:
            msg = "index %s is older than TTL. It doesn't need to insert DB"
            storm.log(msg % time_idx)
            return

        if time_idx not in self.df.index:
            self._reindex()

        value = utils.to_default_unit(value, unit)

        try:
            stat = self.df.ix[time_idx]

            for v in stat:
                if v == None:
                    v = float("nan")

        except KeyError:
            stat = self.cass.get_metric_statistics_for_key(metric_key, time_idx)
            stat = get_stats(stat)

        stat["SampleCount"] = 1.0 if isnull(stat["SampleCount"]) else stat["SampleCount"] + 1.0
        stat["Sum"] = value if isnull(stat["Sum"]) else stat["Sum"] + value
        stat["Average"] = stat["Sum"] / stat["SampleCount"]
        stat["Minimum"] = value if (isnull(stat["Minimum"]) or stat["Minimum"] > value) else stat["Minimum"]
        stat["Maximum"] = value if (isnull(stat["Maximum"]) or stat["Maximum"] < value) else stat["Maximum"]

        # insert into DB
        stat_dict = {
            "SampleCount": {time_idx: stat["SampleCount"]},
            "Sum": {time_idx: stat["Sum"]},
            "Average": {time_idx: stat["Average"]},
            "Minimum": {time_idx: stat["Minimum"]},
            "Maximum": {time_idx: stat["Maximum"]},
        }

        ttl = self.STATISTICS_TTL - time_diff.total_seconds()
        self.cass.insert_stat(self.metric_key, stat_dict, ttl)
        storm.log("metric data inserted %s" % (self.metric_key))
Ejemplo n.º 4
0
 def parse_metric_data(metric):
     try:
         dimensions_ = metric.get('dimensions', {})
         dimensions = utils.extract_member_dict(dimensions_)
     except KeyError:
         err = "Unsuitable Dimensions Value - %s" % str(dimensions_)
         raise InvalidParameterValue(err)
     
     self.check_dimensions(dimensions)
 
     metric_name = metric.get('metric_name')
     unit = metric.get('unit', 'None')
     value = metric.get('value')
     req_timestamp = metric.get('timestamp')
     timestamp = req_timestamp if req_timestamp \
                 else utils.strtime(utils.utcnow())
     timebound = (datetime.datetime.utcnow() - 
                  datetime.timedelta(
                                 seconds=FLAGS.get('statistics_ttl')))
     
     if utils.parse_strtime(timestamp) < timebound:
         err = "Stale metric data - %s" % timestamp
         raise InvalidParameterValue(err)
     
     self.check_metric_name(metric_name)
     self.check_unit(unit)
     
     return metric_name, dimensions, value, unit, timestamp 
Ejemplo n.º 5
0
 def log_request_completion(self, response, request, start):
     apireq = request.environ.get('cloudwatch.request', None)
     if apireq:
         controller = apireq.controller
         action = apireq.action
     else:
         controller = None
         action = None
     ctxt = request.environ.get('synaps.context', None)
     delta = utils.utcnow() - start
     seconds = delta.seconds
     microseconds = delta.microseconds
     LOG.info(
         "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
         seconds,
         microseconds,
         request.remote_addr,
         request.method,
         "%s%s" % (request.script_name, request.path_info),
         controller,
         action,
         response.status_int,
         request.user_agent,
         request.content_type,
         response.content_type,
         context=ctxt)        
Ejemplo n.º 6
0
 def log_request_completion(self, response, request, start):
     apireq = request.environ.get('cloudwatch.request', None)
     if apireq:
         controller = apireq.controller
         action = apireq.action
     else:
         controller = None
         action = None
     ctxt = request.environ.get('synaps.context', None)
     delta = utils.utcnow() - start
     seconds = delta.seconds
     microseconds = delta.microseconds
     LOG.info(
         "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
         seconds,
         microseconds,
         request.remote_addr,
         request.method,
         "%s%s" % (request.script_name, request.path_info),
         controller,
         action,
         response.status_int,
         request.user_agent,
         request.content_type,
         response.content_type,
         context=ctxt)        
Ejemplo n.º 7
0
    def put_metric_data(self,
                        context,
                        project_id,
                        namespace,
                        metric_name,
                        dimensions,
                        value,
                        unit,
                        timestamp=None,
                        is_admin=False):
        admin_namespace = FLAGS.get('admin_namespace')
        if namespace.startswith(admin_namespace) and not is_admin:
            raise AdminRequired()

        timestamp = timestamp or utils.strtime(utils.utcnow())

        message = {
            'project_id': project_id,
            'namespace': namespace,
            'metric_name': metric_name,
            'dimensions': dimensions,
            'value': value,
            'unit': unit,
            'timestamp': timestamp,
            'context': context.to_dict()
        }

        self.rpc.send_msg(rpc.PUT_METRIC_DATA_MSG_ID, message)
        LOG.info("PUT_METRIC_DATA_MSG sent")

        return {}
Ejemplo n.º 8
0
    def set_alarm_actions(self, context, project_id, alarm_names, enabled):
        for alarm_name in alarm_names:
            alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name)
            if not alarm_key:
                raise InvalidParameterValue("Alarm %s does not exist" %
                                            alarm_name)

        for alarm_name in alarm_names:
            alarm_key = self.cass.get_metric_alarm_key(project_id, alarm_name)
            history_data = {
                'actions_enabled': enabled,
                'project_id': project_id
            }
            self.cass.put_metric_alarm(alarm_key, history_data)

            if enabled:
                summary = "Alarm actions for %s are enabled" % alarm_name
            else:
                summary = "Alarm actions for %s are disabled" % alarm_name
            history_key = uuid.uuid4()
            history_column = {
                'project_id': project_id,
                'alarm_key': alarm_key,
                'alarm_name': alarm_name,
                'history_data': json.dumps(history_data),
                'history_item_type': 'ConfigurationUpdate',
                'history_summary': summary,
                'timestamp': utils.utcnow()
            }
            self.cass.insert_alarm_history(history_key, history_column)
Ejemplo n.º 9
0
    def check_alarms(self, query_time=None):
        query_time = query_time if query_time else utils.utcnow()
        for alarmkey, alarm in self.alarms.iteritems():
            self._check_alarm(alarmkey, alarm, query_time)

        self.lastchecked = self.lastchecked if self.lastchecked else query_time
        if self.lastchecked < query_time:
            self.lastchecked = query_time
Ejemplo n.º 10
0
    def check_alarms(self, query_time=None):
        query_time = query_time if query_time else utils.utcnow() 
        for alarmkey, alarm in self.alarms.iteritems():
            self._check_alarm(alarmkey, alarm, query_time)

        self.lastchecked = self.lastchecked if self.lastchecked else query_time
        if self.lastchecked < query_time:
            self.lastchecked = query_time
Ejemplo n.º 11
0
 def meter_instance_actions(self, project_id, receivers):
     ctxt = get_admin_context()
     self.api.put_metric_data(ctxt, project_id, namespace="SPCS/SYNAPS",
                              metric_name="InstanceActionCount",
                              dimensions={}, value=len(receivers),
                              unit="Count",
                              timestamp=utils.strtime(utils.utcnow()),
                              is_admin=True)
     LOG.audit("Meter InstanceAction: %s %s %s", project_id, len(receivers),
               receivers)
Ejemplo n.º 12
0
 def meter_instance_actions(self, project_id, receivers):
     ctxt = get_admin_context()
     self.api.put_metric_data(ctxt,
                              project_id,
                              namespace="SPCS/SYNAPS",
                              metric_name="InstanceActionCount",
                              dimensions={},
                              value=len(receivers),
                              unit="Count",
                              timestamp=utils.strtime(utils.utcnow()),
                              is_admin=True)
     LOG.audit("Meter InstanceAction: %s %s %s", project_id, len(receivers),
               receivers)
Ejemplo n.º 13
0
    def meter_sms_actions(self, project_id, receivers):
        ctxt = get_admin_context()
        local_receivers = [r for r in receivers if r.startswith("+82")]
        international_receivers = [r for r in receivers if not 
                                   r.startswith("+82")]
        
        self.api.put_metric_data(ctxt, project_id, namespace="SPCS/SYNAPS",
                                 metric_name="LocalSMSActionCount",
                                 dimensions={}, value=len(local_receivers),
                                 unit="Count",
                                 timestamp=utils.strtime(utils.utcnow()),
                                 is_admin=True)

        self.api.put_metric_data(ctxt, project_id, namespace="SPCS/SYNAPS",
                                 metric_name="InternationalSMSActionCount",
                                 dimensions={},
                                 value=len(international_receivers),
                                 unit="Count",
                                 timestamp=utils.strtime(utils.utcnow()),
                                 is_admin=True)
        
        LOG.audit("Meter SMS: %s %s %s", project_id, len(receivers), receivers)
Ejemplo n.º 14
0
 def alarm_history_delete(self, alarm_key, alarm):
     item_type = 'ConfigurationUpdate'
     summary = "Alarm %s deleted" % alarm['alarm_name']
     
     history_key = uuid.uuid4()
     history_column = {
         'project_id': alarm['project_id'],
         'alarm_key': alarm_key,
         'alarm_name': alarm['alarm_name'],
         'history_data': json.dumps({'type': 'Delete', 'version': '1.0'}),
         'history_item_type': item_type,
         'history_summary': summary,
         'timestamp': utils.utcnow()
     }
     
     self.cass.insert_alarm_history(history_key, history_column)
Ejemplo n.º 15
0
    def alarm_history_delete(self, alarm_key, alarm):
        item_type = "ConfigurationUpdate"
        summary = "Alarm %s deleted" % alarm["alarm_name"]

        history_key = uuid.uuid4()
        history_column = {
            "project_id": alarm["project_id"],
            "alarm_key": alarm_key,
            "alarm_name": alarm["alarm_name"],
            "history_data": json.dumps({"type": "Delete", "version": "1.0"}),
            "history_item_type": item_type,
            "history_summary": summary,
            "timestamp": utils.utcnow(),
        }

        self.cass.insert_alarm_history(history_key, history_column)
Ejemplo n.º 16
0
    def alarm_history_state_update(self, alarmkey, alarm,
                                   notification_message):
        """
        update alarm history based on notification message
        
        notification_message = {
            'method': "email",
            'receivers': email_receivers,
            'subject': message['subject'],
            'body': message['body'],
            'state': "ok" | "failed"
        }
        """
        item_type = 'Action'
        project_id = alarm['project_id']
        if notification_message.get("method") in ("email", "SMS"):
            if notification_message.get('state', 'ok') == 'ok':
                history_summary = "Message '%(subject)s' is sent via"\
                                  " %(method)s" % notification_message
            else:
                history_summary = "Failed to send a message '%(subject)s' via"\
                                  " %(method)s" % notification_message
        elif notification_message.get("method") in ("InstanceAction"):
            if notification_message.get('state', 'ok') == 'ok':
                history_summary = "%(method)s %(receivers)s is invoked." % \
                                  notification_message
            else:
                history_summary = "Failed to invoke %(method)s %(receivers)s."\
                                  % notification_message

        timestamp = utils.utcnow()

        history_key = uuid4()
        column = {
            'project_id': project_id,
            'alarm_key': UUID(alarmkey),
            'alarm_name': alarm['alarm_name'],
            'history_data': json.dumps(notification_message),
            'history_item_type': item_type,
            'history_summary': history_summary,
            'timestamp': timestamp
        }

        self.cass.insert_alarm_history(history_key,
                                       column,
                                       ttl=self.statistics_ttl)
        LOG.info("History updated. %s", history_summary)
Ejemplo n.º 17
0
 def alarm_history_state_update(self, alarmkey, alarm,
                                notification_message):
     """
     update alarm history based on notification message
     
     notification_message = {
         'method': "email",
         'receivers': email_receivers,
         'subject': message['subject'],
         'body': message['body'],
         'state': "ok" | "failed"
     }
     """        
     item_type = 'Action'
     project_id = alarm['project_id']
     if notification_message.get("method") in ("email", "SMS"):
         if notification_message.get('state', 'ok') == 'ok':
             history_summary = "Message '%(subject)s' is sent via"\
                               " %(method)s" % notification_message
         else:
             history_summary = "Failed to send a message '%(subject)s' via"\
                               " %(method)s" % notification_message
     elif notification_message.get("method") in ("InstanceAction"):
         if notification_message.get('state', 'ok') == 'ok':
             history_summary = "%(method)s %(receivers)s is invoked." % \
                               notification_message
         else:
             history_summary = "Failed to invoke %(method)s %(receivers)s."\
                               % notification_message 
         
     
     timestamp = utils.utcnow()
     
     history_key = uuid4()
     column = {'project_id':project_id,
               'alarm_key':UUID(alarmkey),
               'alarm_name':alarm['alarm_name'],
               'history_data': json.dumps(notification_message),
               'history_item_type':item_type,
               'history_summary':history_summary,
               'timestamp':timestamp}
     
     self.cass.insert_alarm_history(history_key, column,
                                    ttl=self.statistics_ttl)
     LOG.info("History updated. %s", history_summary)
Ejemplo n.º 18
0
    def put_metric_data(self, context, project_id, namespace, metric_name,
                        dimensions, value, unit, timestamp=None,
                        is_admin=False):
        admin_namespace = FLAGS.get('admin_namespace')
        if namespace.startswith(admin_namespace) and not is_admin:
            raise AdminRequired()
        
        timestamp = timestamp or utils.strtime(utils.utcnow())
        
        message = {'project_id': project_id, 'namespace':namespace,
                   'metric_name': metric_name, 'dimensions': dimensions,
                   'value':value, 'unit':unit, 'timestamp':timestamp,
                   'context': context.to_dict()}
        
        self.rpc.send_msg(rpc.PUT_METRIC_DATA_MSG_ID, message)
        LOG.info("PUT_METRIC_DATA_MSG sent")

        return {}
Ejemplo n.º 19
0
    def __init__(
        self,
        user_id,
        project_id,
        is_admin=None,
        read_deleted="no",
        roles=None,
        remote_address=None,
        timestamp=None,
        request_id=None,
        auth_token=None,
        strategy="noauth",
        overwrite=True,
    ):
        """
        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.
        """
        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = "admin" in [x.lower() for x in self.roles]
        elif self.is_admin and "admin" not in self.roles:
            self.roles.append("admin")
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = utils.utcnow()
        if isinstance(timestamp, basestring):
            timestamp = utils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token
        self.strategy = strategy
        if overwrite or not hasattr(local.store, "context"):
            local.store.context = self
Ejemplo n.º 20
0
    def alarm_history_delete(self, alarm_key, alarm):
        item_type = 'ConfigurationUpdate'
        summary = "Alarm %s deleted" % alarm['alarm_name']

        history_key = uuid.uuid4()
        history_column = {
            'project_id': alarm['project_id'],
            'alarm_key': alarm_key,
            'alarm_name': alarm['alarm_name'],
            'history_data': json.dumps({
                'type': 'Delete',
                'version': '1.0'
            }),
            'history_item_type': item_type,
            'history_summary': summary,
            'timestamp': utils.utcnow()
        }

        self.cass.insert_alarm_history(history_key, history_column)
Ejemplo n.º 21
0
        def parse_metric_data(metric):
            try:
                dimensions_ = metric.get('dimensions', {})
                dimensions = utils.extract_member_dict(dimensions_)
            except KeyError:
                err = "Unsuitable Dimensions Value - %s" % str(dimensions_)
                raise InvalidParameterValue(err)
        
            metric_name = metric.get('metric_name')
            unit = metric.get('unit', 'None')
            value = metric.get('value')
            req_timestamp = metric.get('timestamp')
            timestamp = req_timestamp if req_timestamp \
                        else utils.strtime(utils.utcnow())

            self.check_metric_name(metric_name)
            self.check_unit(unit)
            
            return metric_name, dimensions, value, unit, timestamp 
Ejemplo n.º 22
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 strategy='noauth',
                 overwrite=True):
        """
        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.
        """
        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = 'admin' in [x.lower() for x in self.roles]
        elif self.is_admin and 'admin' not in self.roles:
            self.roles.append('admin')
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = utils.utcnow()
        if isinstance(timestamp, basestring):
            timestamp = utils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token
        self.strategy = strategy
        if overwrite or not hasattr(local.store, 'context'):
            local.store.context = self
Ejemplo n.º 23
0
    def alarm_history_state_update(self, alarmkey, alarm, new_state,
                                          old_state):
        item_type = 'StateUpdate'
        project_id = alarm['project_id']
        summary_tpl = "Alarm updated from %s to %s" 
        summary = summary_tpl % (old_state.get('stateValue',
                                               'INSUFFICIENT_DATA'),
                                 new_state.get('stateValue',
                                               'INSUFFICIENT_DATA'))
        timestamp = utils.utcnow()
        data = {'newState':new_state, 'oldState':old_state, 'version':'1.0'}

        history_key = uuid.uuid4()
        column = {'project_id':project_id, 'alarm_key':alarmkey,
                  'alarm_name':alarm['alarm_name'],
                  'history_data':json.dumps(data),
                  'history_item_type':item_type, 'history_summary':summary,
                  'timestamp':timestamp}
        
        self.cass.insert_alarm_history(history_key, column)
        LOG.audit("Alarm history added \n %s", summary)
Ejemplo n.º 24
0
    def alarm_history_state_update(self, alarmkey, alarm, new_state,
                                   old_state):
        item_type = 'StateUpdate'
        project_id = alarm['project_id']
        summary_tpl = "Alarm updated from %s to %s"
        summary = summary_tpl % (
            old_state.get('stateValue', 'INSUFFICIENT_DATA'),
            new_state.get('stateValue', 'INSUFFICIENT_DATA'))
        timestamp = utils.utcnow()
        data = {'newState': new_state, 'oldState': old_state, 'version': '1.0'}

        history_key = uuid.uuid4()
        column = {
            'project_id': project_id,
            'alarm_key': alarmkey,
            'alarm_name': alarm['alarm_name'],
            'history_data': json.dumps(data),
            'history_item_type': item_type,
            'history_summary': summary,
            'timestamp': timestamp
        }

        self.cass.insert_alarm_history(history_key, column)
        LOG.audit("Alarm history added \n %s", summary)
Ejemplo n.º 25
0
    def alarm_history_state_update(self, alarmkey, alarm, notification_message):
#                notification_message = {
#                    'method': "email",
#                    'receivers': email_receivers,
#                    'subject': message['subject'],
#                    'body': message['body']
#                }        
        item_type = 'Action'
        project_id = alarm['project_id']
        history_summary = ("Message '%(subject)s' is sent via %(method)s" % 
                           notification_message)
        timestamp = utils.utcnow()
        
        history_key = uuid4()
        column = {'project_id':project_id,
                  'alarm_key':UUID(alarmkey),
                  'alarm_name':alarm['alarm_name'],
                  'history_data': json.dumps(notification_message),
                  'history_item_type':item_type,
                  'history_summary':history_summary,
                  'timestamp':timestamp}
        
        self.cass.insert_alarm_history(history_key, column)
        storm.log("alarm history \n %s" % history_summary)
Ejemplo n.º 26
0
    def alarm_history_state_update(self, alarmkey, alarm, new_state, old_state):
        item_type = "StateUpdate"
        project_id = alarm["project_id"]
        summary_tpl = "Alarm updated from %s to %s"
        summary = summary_tpl % (
            old_state.get("stateValue", "INSUFFICIENT_DATA"),
            new_state.get("stateValue", "INSUFFICIENT_DATA"),
        )
        timestamp = utils.utcnow()
        data = {"newState": new_state, "oldState": old_state, "version": "1.0"}

        history_key = uuid.uuid4()
        column = {
            "project_id": project_id,
            "alarm_key": alarmkey,
            "alarm_name": alarm["alarm_name"],
            "history_data": json.dumps(data),
            "history_item_type": item_type,
            "history_summary": summary,
            "timestamp": timestamp,
        }

        self.cass.insert_alarm_history(history_key, column)
        storm.log("alarm history \n %s" % summary)
Ejemplo n.º 27
0
 def check_alarms(self):
     for alarmkey, alarm in self.alarms.iteritems():
         self._check_alarm(alarmkey, alarm)
     self.lastchecked = utils.utcnow()
Ejemplo n.º 28
0
    def put_metric_data(self, metric_key, timestamp, value, unit=None):

        time_idx = timestamp.replace(second=0, microsecond=0)

        if timedelta(seconds=self.cass.STATISTICS_TTL) < (utils.utcnow() - time_idx):
            msg = "index %s is older than TTL. It doesn't need to insert DB"
            storm.log(msg % time_idx)
            return

        if time_idx not in self.df.index:
            self._reindex()

        value = utils.to_default_unit(value, unit)

        try:
            stat = self.df.ix[time_idx]

            for v in stat:
                if v == None:
                    v = float("nan")

        except KeyError:
            stat = self.cass.get_metric_statistics_for_key(metric_key, time_idx)
            if [{}, {}, {}, {}, {}] == stat:
                storm.log("index %s is not in DB." % time_idx)
                stat = {
                    "SampleCount": float("nan"),
                    "Sum": float("nan"),
                    "Average": float("nan"),
                    "Minimum": float("nan"),
                    "Maximum": float("nan"),
                }

            else:
                stat = dict(zip(self.cass.STATISTICS, map(lambda x: x.values()[0], stat)))
                for v in stat:
                    if v == None:
                        v = float("nan")

        stat["SampleCount"] = 1.0 if isnan(stat["SampleCount"]) else stat["SampleCount"] + 1.0
        stat["Sum"] = value if isnan(stat["Sum"]) else stat["Sum"] + value
        stat["Average"] = stat["Sum"] / stat["SampleCount"]
        stat["Minimum"] = value if isnan(stat["Minimum"]) or stat["Minimum"] > value else stat["Minimum"]
        stat["Maximum"] = value if isnan(stat["Maximum"]) or stat["Maximum"] < value else stat["Maximum"]

        # insert into DB
        stat_dict = {
            "SampleCount": {time_idx: stat["SampleCount"]},
            "Sum": {time_idx: stat["Sum"]},
            "Average": {time_idx: stat["Average"]},
            "Minimum": {time_idx: stat["Minimum"]},
            "Maximum": {time_idx: stat["Maximum"]},
        }

        self.cass.insert_stat(self.metric_key, stat_dict)
        storm.log("metric data inserted %s" % (self.metric_key))

        # self.df.ix[time_idx] = stat

        now = utils.utcnow().replace(second=0, microsecond=0)
        timedelta_buf = now - time_idx

        if timedelta_buf <= timedelta(seconds=self.MAX_START_PERIOD):
            # check alarms
            self.check_alarms()
Ejemplo n.º 29
0
    def process_put_metric_alarm_msg(self, metric_key, message):
        def get_alarm_key(project_id, alarm_name):
            key = self.cass.get_metric_alarm_key(project_id, alarm_name)
            return key

        def metricalarm_for_json(metricalarm):
            cut = metricalarm.get('alarm_configuration_updated_timestamp')
            
            alarm_for_json = {
                'actionEnabled': metricalarm.get('actions_enabled', False),
                'alarmActions': metricalarm.get('alarm_actions', []),
                'alarmArn': metricalarm.get('alarm_arn'),
                'alarmConfigurationUpdatedTimestamp': utils.strtime(cut),
                'alarmDescription': metricalarm.get('alarm_description'),
                'alarmName': metricalarm.get('alarm_name'),
                'comparisonOperator': metricalarm.get('comparison_operator'),
                'dimensions': metricalarm.get('dimensions'),
                'evaluationPeriods': metricalarm.get('evaluation_periods'),
                'insufficientDataActions': 
                    metricalarm.get('insufficient_data_actions', []),
                'metricName':metricalarm.get('metric_name'),
                'namespace':metricalarm.get('namespace'),
                'okactions':metricalarm.get('ok_actions', []),
                'statistic':metricalarm.get('statistic'),
                'threshold':metricalarm.get('threshold'),
                'unit':metricalarm.get('unit'),
            }
            return alarm_for_json
                
        if metric_key not in self.metrics:
            self.metrics[metric_key] = MetricMonitor(metric_key, self.cass)
        project_id = message['project_id']
        metricalarm = message['metricalarm']
        
        # build metricalarm column, alarmhistory column 
        alarm_key = get_alarm_key(project_id, metricalarm['alarm_name'])
        history_type = 'Update' if alarm_key else 'Create'
        now = utils.utcnow()
        if history_type == 'Update':
            original_alarm = self.cass.get_metric_alarm(alarm_key)
            for dict_key in ['state_updated_timestamp', 'state_reason',
                             'state_reason_data', 'state_value', 'project_id']:
                metricalarm[dict_key] = original_alarm[dict_key]
            metricalarm['alarm_configuration_updated_timestamp'] = now
            history_data = json.dumps({
                'updatedAlarm':metricalarm_for_json(metricalarm),
                'type':history_type,
                'version': '1.0'
            })
            summary = "Alarm %s updated" % metricalarm['alarm_name']                
        else:
            alarm_key = uuid.uuid4()
            state_reason = "Unchecked: Initial alarm creation"
            metricalarm.update({'state_updated_timestamp': now,
                                'alarm_configuration_updated_timestamp': now,
                                'state_reason': state_reason,
                                'state_reason_data': json.dumps({}),
                                'state_value': "INSUFFICIENT_DATA",
                                'project_id': project_id})
            history_data = json.dumps({
                'createdAlarm': metricalarm_for_json(metricalarm),
                'type':history_type, 'version': '1.0'
            })
            summary = "Alarm %s created" % metricalarm['alarm_name']

        metricalarm['metric_key'] = metric_key
        
        history_key = uuid.uuid4()
        history_column = {
            'project_id': project_id,
            'alarm_key': alarm_key,
            'alarm_name': metricalarm['alarm_name'],
            'history_data': history_data,
            'history_item_type': 'ConfigurationUpdate',
            'history_summary':summary,
            'timestamp': utils.utcnow()
        }
            
        self.cass.put_metric_alarm(alarm_key, metricalarm)
        self.cass.insert_alarm_history(history_key, history_column)
        LOG.info("metric alarm inserted: %s %s", alarm_key, metricalarm)       
                
        # load metric in memory     
        self.metrics[metric_key].put_alarm(alarm_key, metricalarm)
Ejemplo n.º 30
0
    def put_metric_alarm(self, project_id, metricalarm):
        """
        알람을 DB에 넣고 값이 빈 dictionary 를 반환한다.
        메트릭 유무 확인
        알람 히스토리 발생.
        """
        def metricalarm_for_json(metricalarm):
            alarm_for_json = {
                'actionEnabled': metricalarm.get('actions_enabled', False),
                'alarmActions': metricalarm.get('alarm_actions', []),
                'alarmArn': metricalarm.get('alarm_arn'),
                'alarmConfigurationUpdatedTimestamp': 
                      metricalarm.get('alarm_configuration_updated_timestamp'),
                'alarmDescription': metricalarm.get('alarm_description'),
                'alarmName': metricalarm.get('alarm_name'),
                'comparisonOperator': metricalarm.get('comparison_operator'),
                'dimensions': metricalarm.get('dimensions'),
                'evaluationPeriods': metricalarm.get('evaluation_periods'),
                'insufficientDataActions': 
                    metricalarm.get('insufficient_data_actions', []),
                'metricName':metricalarm.get('metric_name'),
                'namespace':metricalarm.get('namespace'),
                'okactions':metricalarm.get('ok_actions', []),
                'statistic':metricalarm.get('statistic'),
                'threshold':metricalarm.get('threshold'),
                'unit':metricalarm.get('unit'),
            }
            return alarm_for_json

        now = utils.utcnow()
        metricalarm = metricalarm.to_columns()
        
        # 메트릭 유무 확인
        metric_key = self.cass.get_metric_key_or_create(
            project_id=project_id,
            namespace=metricalarm['namespace'],
            metric_name=metricalarm['metric_name'],
            dimensions=json.loads(metricalarm['dimensions']),
            unit=metricalarm['unit'],
        )
        
        metricalarm['project_id'] = project_id
        metricalarm['metric_key'] = metric_key
        metricalarm['alarm_arn'] = "arn:spcs:synaps:%s:alarm:%s" % (
            project_id, metricalarm['alarm_name']
        )
        metricalarm['alarm_configuration_updated_timestamp'] = now
        
        # 알람 유무 확인
        alarm_key = self.cass.get_metric_alarm_key(
            project_id=project_id, alarm_name=metricalarm['alarm_name']
        )
        
        
        if alarm_key:            
            history_type = 'Update'
            before_alarm = self.cass.get_metric_alarm(alarm_key)
            if before_alarm['metric_key'] != metricalarm['metric_key']:
                raise InvalidRequest("Metric cannot be changed.")
            
            metricalarm['state_updated_timestamp'] = \
                before_alarm['state_updated_timestamp']
            metricalarm['state_reason'] = before_alarm['state_reason']
            metricalarm['state_reason_data'] = \
                before_alarm['state_reason_data']
            metricalarm['state_value'] = before_alarm['state_value']
            
        else:            
            history_type = "Create"
            alarm_key = uuid.uuid4()
            metricalarm['state_updated_timestamp'] = utils.utcnow()
            metricalarm['state_reason'] = "Unchecked: Initial alarm creation"
            metricalarm['state_reason_data'] = json.dumps({})
            metricalarm['state_value'] = "INSUFFICIENT_DATA"
            
        
        # insert alarm into database
        self.cass.put_metric_alarm(alarm_key, metricalarm)
        LOG.debug("metric alarm inserted alarm key: %s" % (alarm_key))

        # to make json, convert datetime type into str        
        metricalarm['state_updated_timestamp'] = utils.strtime(
            metricalarm['state_updated_timestamp']
        )
        metricalarm['alarm_configuration_updated_timestamp'] = utils.strtime(
            metricalarm['alarm_configuration_updated_timestamp']
        )
        metricalarm['metric_key'] = str(metric_key)
        
        if history_type == "Update":
            history_data = json.dumps({
                'updatedAlarm':metricalarm_for_json(metricalarm),
                'type':history_type,
                'version': '1.0'
            })
            summary = "Alarm %s updated" % metricalarm['alarm_name']
        else:
            history_data = json.dumps({
                'createdAlarm': metricalarm_for_json(metricalarm),
                'type':history_type, 'version': '1.0'
            })
            summary = "Alarm %s created" % metricalarm['alarm_name']
        
        history_key = uuid.uuid4()
        history_column = {
            'project_id': project_id,
            'alarm_key': alarm_key,
            'alarm_name': metricalarm['alarm_name'],
            'history_data': history_data,
            'history_item_type': 'ConfigurationUpdate',
            'history_summary':summary,
            'timestamp': utils.utcnow()
        }
            
        self.cass.insert_alarm_history(history_key, history_column)
        
        message = {'project_id': project_id, 'metric_key': str(metric_key),
                   'metricalarm': metricalarm}
        self.rpc.send_msg(rpc.PUT_METRIC_ALARM_MSG_ID, message)
        LOG.info("PUT_METRIC_ALARM_MSG sent")

        return {}
Ejemplo n.º 31
0
    def put_metric_alarm(self, context, project_id, metricalarm):
        """
        Send put metric alarm message to Storm 
        """
        def _validate_actions(alarm):
            for actions in (alarm.ok_actions, alarm.insufficient_data_actions,
                            alarm.alarm_actions):
                for action in actions:
                    if utils.validate_groupnotification_action(action):
                        group = utils.parse_groupnotification_action(action)
                        if not self.cass.get_notification_group(group):
                            raise InvalidNotificationGroup()

        now = utils.utcnow()
        _validate_actions(metricalarm)
        metricalarm = metricalarm.to_columns()
        alarm_name = metricalarm['alarm_name']
        namespace = metricalarm['namespace']
        metric_name = metricalarm['metric_name']
        dimensions = json.loads(metricalarm['dimensions'])

        # check if we have metric in database
        metric_key = self.cass.get_metric_key_or_create(
            project_id=project_id,
            namespace=namespace,
            metric_name=metric_name,
            dimensions=dimensions,
            unit=metricalarm['unit'])

        update_data = {
            'project_id': project_id,
            'metric_key': str(metric_key),
            'alarm_arn':
            "arn:spcs:synaps:%s:alarm:%s" % (project_id, alarm_name),
            'alarm_configuration_updated_timestamp': utils.strtime(now)
        }
        metricalarm.update(update_data)

        # check if metric is changed
        alarm_key = self.cass.get_metric_alarm_key(project_id=project_id,
                                                   alarm_name=alarm_name)

        if alarm_key:
            original_alarm = self.cass.get_metric_alarm(alarm_key)
            if (str(original_alarm['metric_key']) != str(
                    metricalarm['metric_key'])):
                raise InvalidRequest("Metric cannot be changed. "
                                     "Delete alarm and retry.")
        else:
            # If alarm is newly added, check quotas
            # check alarm quota per project
            project_quota = FLAGS.get('alarm_quota_per_project')
            alarms_in_project = self.cass.get_alarm_count(project_id)
            if alarms_in_project >= project_quota:
                LOG.info("Too many alarms(%d) in the project %s",
                         alarms_in_project, project_id)
                raise ProjectAlarmQuotaExceeded()

            # check alarm quota per metric
            metric_quota = FLAGS.get('alarm_quota_per_metric')
            alarms_per_metric = self.cass.get_alarms_per_metric_count(
                project_id, namespace, metric_name, dimensions)
            if alarms_per_metric >= metric_quota:
                LOG.info("Too many alarms(%d) for this metric",
                         alarms_per_metric)
                raise MetricAlarmQuotaExceeded()

        message = {
            'project_id': project_id,
            'metric_key': str(metric_key),
            'metricalarm': metricalarm,
            'context': context.to_dict()
        }
        self.rpc.send_msg(rpc.PUT_METRIC_ALARM_MSG_ID, message)
        LOG.info("PUT_METRIC_ALARM_MSG sent")

        return {}
Ejemplo n.º 32
0
    def _check_alarm(self, alarmkey, alarm, query_time=None):
        period = int(alarm['period'] / 60)
        evaluation_periods = alarm['evaluation_periods']
        statistic = alarm['statistic']
        threshold = alarm['threshold']
        alarm_name = alarm['alarm_name']
        cmp_op = self.CMP_MAP[alarm['comparison_operator']]
        unit = alarm['unit']
        state_value = alarm['state_value']
        
        query_time = query_time if query_time else utils.utcnow()
        
        for i in range(self.insufficient_buffer):
            end_idx = (query_time.replace(second=0, microsecond=0) - 
                       (i + 1) * datetools.Minute())
            try:
                end_datapoint = self.df[statistic].ix[end_idx]
            except KeyError:
                end_datapoint = None
            if not isnull(end_datapoint):
                break
            
        start_idx = (end_idx - (period * evaluation_periods) * 
                     datetools.Minute())
        start_ana_idx = start_idx - datetools.Minute() * period
        
        func = self.ROLLING_FUNC_MAP[statistic]
        data = func(self.df[statistic].ix[start_ana_idx:end_idx], period,
                    min_periods=0).ix[start_idx:end_idx:period][1:]
        recent_datapoints = list(data)

        if unit and statistic is not 'SampleCount':
            data = data / utils.UNIT_CONV_MAP[unit]
            threshold = threshold / utils.UNIT_CONV_MAP[unit]
                
        data = data.dropna()

        query_date = utils.strtime(query_time)
        reason_data = {
            "period":alarm['period'],
            "queryDate":query_date,
            "recentDatapoints": recent_datapoints,
            "startDate": utils.strtime(start_idx),
            "statistic":statistic,
            "threshold": threshold,
            "version":"1.0",
        }
        old_state = {'stateReason':alarm.get('reason', ""),
                     'stateValue':alarm.get('state_value',
                                            "INSUFFICIENT_DATA"),
                     'stateReasonData':
                        json.loads(alarm.get('reason_data', "{}"))}
        json_reason_data = json.dumps(reason_data)

        if len(data) == 0:
            if state_value != 'INSUFFICIENT_DATA':
                template = _("Insufficient Data: %d datapoints were unknown.")
                reason = template % (evaluation_periods - len(data))
                new_state = {'stateReason':reason,
                             'stateReasonData':reason_data,
                             'stateValue':'INSUFFICIENT_DATA'}
                self.update_alarm_state(alarmkey, 'INSUFFICIENT_DATA', reason,
                                        json_reason_data, query_time)
                self.cass.update_alarm_state(alarmkey, 'INSUFFICIENT_DATA',
                                             reason, json_reason_data,
                                             query_time)
                self.alarm_history_state_update(alarmkey, alarm,
                                                new_state, old_state)
                self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                     query_date)
                LOG.audit("Alarm %s status changed to INSUFFICIENT_DATA",
                          alarm_name)
        else:
            sufficient = len(data) >= evaluation_periods
            crossed = (sufficient and 
                       reduce(operator.and_, cmp_op(data, threshold)))
            com_op = alarm['comparison_operator']
            
            if crossed:
                template = _("Threshold Crossed: %d datapoints were %s " + 
                             "the threshold(%f). " + 
                             "The most recent datapoints: %s.")
                reason = template % (evaluation_periods,
                                     self.CMP_STR_MAP[com_op], threshold,
                                     recent_datapoints)
                if state_value != 'ALARM':
                    new_state = {'stateReason':reason,
                                 'stateReasonData':reason_data,
                                 'stateValue':'ALARM'}
                    
                    self.update_alarm_state(alarmkey, 'ALARM', reason,
                                            json_reason_data, query_time)
                    self.cass.update_alarm_state(alarmkey, 'ALARM', reason,
                                                 json_reason_data, query_time)
                    self.alarm_history_state_update(alarmkey, alarm,
                                                    new_state, old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                         query_date)                    
                    LOG.audit("Alarm %s status changed to ALARM", alarm_name)
            else:
                template = _("Threshold Crossed: %d datapoints were not %s " + 
                             "the threshold(%f). " + 
                             "The most recent datapoints: %s.")
                reason = template % (evaluation_periods,
                                     self.CMP_STR_MAP[com_op], threshold,
                                     recent_datapoints)
                if state_value != 'OK':
                    new_state = {'stateReason':reason,
                                 'stateReasonData':reason_data,
                                 'stateValue':'OK'}                    
                    self.update_alarm_state(alarmkey, 'OK', reason,
                                            json_reason_data, query_time)
                    self.cass.update_alarm_state(alarmkey, 'OK', reason,
                                                 json_reason_data, query_time)
                    self.alarm_history_state_update(alarmkey, alarm,
                                                    new_state, old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                         query_date)                            
                    LOG.audit("Alarm %s status changed to OK", alarm_name)
Ejemplo n.º 33
0
    def process_put_metric_alarm_msg(self, metric_key, message):
        def get_alarm_key(project_id, alarm_name):
            key = self.cass.get_metric_alarm_key(project_id, alarm_name)
            return key

        def metricalarm_for_json(metricalarm):
            cut = metricalarm.get('alarm_configuration_updated_timestamp')

            alarm_for_json = {
                'actionEnabled':
                metricalarm.get('actions_enabled', False),
                'alarmActions':
                metricalarm.get('alarm_actions', []),
                'alarmArn':
                metricalarm.get('alarm_arn'),
                'alarmConfigurationUpdatedTimestamp':
                utils.strtime(cut),
                'alarmDescription':
                metricalarm.get('alarm_description'),
                'alarmName':
                metricalarm.get('alarm_name'),
                'comparisonOperator':
                metricalarm.get('comparison_operator'),
                'dimensions':
                metricalarm.get('dimensions'),
                'evaluationPeriods':
                metricalarm.get('evaluation_periods'),
                'insufficientDataActions':
                metricalarm.get('insufficient_data_actions', []),
                'metricName':
                metricalarm.get('metric_name'),
                'namespace':
                metricalarm.get('namespace'),
                'okactions':
                metricalarm.get('ok_actions', []),
                'statistic':
                metricalarm.get('statistic'),
                'threshold':
                metricalarm.get('threshold'),
                'unit':
                metricalarm.get('unit'),
            }
            return alarm_for_json

        if metric_key not in self.metrics:
            self.metrics[metric_key] = MetricMonitor(metric_key, self.cass)
        project_id = message['project_id']
        metricalarm = message['metricalarm']

        # build metricalarm column, alarmhistory column
        alarm_key = get_alarm_key(project_id, metricalarm['alarm_name'])
        history_type = 'Update' if alarm_key else 'Create'
        now = utils.utcnow()
        if history_type == 'Update':
            original_alarm = self.cass.get_metric_alarm(alarm_key)
            for dict_key in [
                    'state_updated_timestamp', 'state_reason',
                    'state_reason_data', 'state_value', 'project_id'
            ]:
                metricalarm[dict_key] = original_alarm[dict_key]
            metricalarm['alarm_configuration_updated_timestamp'] = now
            history_data = json.dumps({
                'updatedAlarm':
                metricalarm_for_json(metricalarm),
                'type':
                history_type,
                'version':
                '1.0'
            })
            summary = "Alarm %s updated" % metricalarm['alarm_name']
        else:
            alarm_key = uuid.uuid4()
            state_reason = "Unchecked: Initial alarm creation"
            metricalarm.update({
                'state_updated_timestamp': now,
                'alarm_configuration_updated_timestamp': now,
                'state_reason': state_reason,
                'state_reason_data': json.dumps({}),
                'state_value': "INSUFFICIENT_DATA",
                'project_id': project_id
            })
            history_data = json.dumps({
                'createdAlarm':
                metricalarm_for_json(metricalarm),
                'type':
                history_type,
                'version':
                '1.0'
            })
            summary = "Alarm %s created" % metricalarm['alarm_name']

        metricalarm['metric_key'] = metric_key

        history_key = uuid.uuid4()
        history_column = {
            'project_id': project_id,
            'alarm_key': alarm_key,
            'alarm_name': metricalarm['alarm_name'],
            'history_data': history_data,
            'history_item_type': 'ConfigurationUpdate',
            'history_summary': summary,
            'timestamp': utils.utcnow()
        }

        self.cass.put_metric_alarm(alarm_key, metricalarm)
        self.cass.insert_alarm_history(history_key, history_column)
        LOG.info("metric alarm inserted: %s %s", alarm_key, metricalarm)

        # load metric in memory
        self.metrics[metric_key].put_alarm(alarm_key, metricalarm)
Ejemplo n.º 34
0
    def _check_alarm(self, alarmkey, alarm):
        period = int(alarm["period"] / 60)
        evaluation_periods = alarm["evaluation_periods"]
        statistic = alarm["statistic"]
        threshold = alarm["threshold"]
        cmp_op = self.CMP_MAP[alarm["comparison_operator"]]
        unit = alarm["unit"]
        state_value = alarm["state_value"]

        now = utils.utcnow()
        end_idx = now.replace(second=0, microsecond=0) - datetools.Minute()
        start_idx = end_idx - (evaluation_periods - 1) * datetools.Minute()
        start_ana_idx = start_idx - datetools.Minute() * period

        func = self.ROLLING_FUNC_MAP[statistic]
        data = func(self.df[statistic].ix[start_ana_idx:end_idx], period, min_periods=0).ix[start_idx:end_idx]

        if statistic == "SampleCount":
            data = data.fillna(0)
        else:
            if unit:
                data = data / utils.UNIT_CONV_MAP[unit]
                threshold = threshold / utils.UNIT_CONV_MAP[unit]

            data = data.dropna()

        query_date = utils.strtime(now)
        reason_data = {
            "period": alarm["period"],
            "queryDate": query_date,
            "recentDatapoints": list(data),
            "startDate": utils.strtime(start_idx),
            "statistic": statistic,
            "threshold": threshold,
            "version": "1.0",
        }
        old_state = {
            "stateReason": alarm.get("reason", ""),
            "stateValue": alarm.get("state_value", "INSUFFICIENT_DATA"),
            "stateReasonData": json.loads(alarm.get("reason_data", "{}")),
        }
        json_reason_data = json.dumps(reason_data)

        if len(data) < evaluation_periods:
            if state_value != "INSUFFICIENT_DATA":
                template = _("Insufficient Data: %d datapoints were unknown.")
                reason = template % (evaluation_periods - len(data))
                new_state = {"stateReason": reason, "stateReasonData": reason_data, "stateValue": "INSUFFICIENT_DATA"}
                self.update_alarm_state(alarmkey, "INSUFFICIENT_DATA", reason, json_reason_data, now)
                self.cass.update_alarm_state(alarmkey, "INSUFFICIENT_DATA", reason, json_reason_data, now)
                self.alarm_history_state_update(alarmkey, alarm, new_state, old_state)
                self.do_alarm_action(alarmkey, alarm, new_state, old_state, query_date)
                storm.log("INSUFFICIENT_DATA alarm")
        else:
            crossed = reduce(operator.and_, cmp_op(data, threshold))
            com_op = alarm["comparison_operator"]

            if crossed:
                template = _(
                    "Threshold Crossed: %d datapoints were %s "
                    + "the threshold(%f). "
                    + "The most recent datapoints: %s."
                )
                reason = template % (len(data), self.CMP_STR_MAP[com_op], threshold, str(list(data)))
                if state_value != "ALARM":
                    new_state = {"stateReason": reason, "stateReasonData": reason_data, "stateValue": "ALARM"}

                    self.update_alarm_state(alarmkey, "ALARM", reason, json_reason_data, now)
                    self.cass.update_alarm_state(alarmkey, "ALARM", reason, json_reason_data, now)
                    self.alarm_history_state_update(alarmkey, alarm, new_state, old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state, query_date)
                    storm.log("ALARM alarm")
            else:
                template = _(
                    "Threshold Crossed: %d datapoints were not %s "
                    + "the threshold(%f). "
                    + "The most recent datapoints: %s."
                )
                reason = template % (len(data), self.CMP_STR_MAP[com_op], threshold, str(list(data)))
                if state_value != "OK":
                    new_state = {"stateReason": reason, "stateReasonData": reason_data, "stateValue": "OK"}
                    self.update_alarm_state(alarmkey, "OK", reason, json_reason_data, now)
                    self.cass.update_alarm_state(alarmkey, "OK", reason, json_reason_data, now)
                    self.alarm_history_state_update(alarmkey, alarm, new_state, old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state, query_date)
                    storm.log("OK alarm")
Ejemplo n.º 35
0
    def _check_alarm(self, alarmkey, alarm, query_time=None):
        period = int(alarm['period'] / 60)
        evaluation_periods = alarm['evaluation_periods']
        statistic = alarm['statistic']
        threshold = alarm['threshold']
        alarm_name = alarm['alarm_name']
        cmp_op = self.CMP_MAP[alarm['comparison_operator']]
        unit = alarm['unit']
        state_value = alarm['state_value']

        query_time = query_time if query_time else utils.utcnow()

        for i in range(self.insufficient_buffer):
            end_idx = (query_time.replace(second=0, microsecond=0) -
                       (i + 1) * datetools.Minute())
            try:
                end_datapoint = self.df[statistic].ix[end_idx]
            except KeyError:
                end_datapoint = None
            if not isnull(end_datapoint):
                break

        start_idx = (end_idx -
                     (period * evaluation_periods) * datetools.Minute())
        start_ana_idx = start_idx - datetools.Minute() * period

        func = self.ROLLING_FUNC_MAP[statistic]
        data = func(self.df[statistic].ix[start_ana_idx:end_idx],
                    period,
                    min_periods=0).ix[start_idx:end_idx:period][1:]
        recent_datapoints = list(data)

        if unit and statistic is not 'SampleCount':
            data = data / utils.UNIT_CONV_MAP[unit]
            threshold = threshold / utils.UNIT_CONV_MAP[unit]

        data = data.dropna()

        query_date = utils.strtime(query_time)
        reason_data = {
            "period": alarm['period'],
            "queryDate": query_date,
            "recentDatapoints": recent_datapoints,
            "startDate": utils.strtime(start_idx),
            "statistic": statistic,
            "threshold": threshold,
            "version": "1.0",
        }
        old_state = {
            'stateReason': alarm.get('reason', ""),
            'stateValue': alarm.get('state_value', "INSUFFICIENT_DATA"),
            'stateReasonData': json.loads(alarm.get('reason_data', "{}"))
        }
        json_reason_data = json.dumps(reason_data)

        if len(data) == 0:
            if state_value != 'INSUFFICIENT_DATA':
                template = _("Insufficient Data: %d datapoints were unknown.")
                reason = template % (evaluation_periods - len(data))
                new_state = {
                    'stateReason': reason,
                    'stateReasonData': reason_data,
                    'stateValue': 'INSUFFICIENT_DATA'
                }
                self.update_alarm_state(alarmkey, 'INSUFFICIENT_DATA', reason,
                                        json_reason_data, query_time)
                self.cass.update_alarm_state(alarmkey, 'INSUFFICIENT_DATA',
                                             reason, json_reason_data,
                                             query_time)
                self.alarm_history_state_update(alarmkey, alarm, new_state,
                                                old_state)
                self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                     query_date)
                LOG.audit("Alarm %s status changed to INSUFFICIENT_DATA",
                          alarm_name)
        else:
            sufficient = len(data) >= evaluation_periods
            crossed = (sufficient
                       and reduce(operator.and_, cmp_op(data, threshold)))
            com_op = alarm['comparison_operator']

            if crossed:
                template = _("Threshold Crossed: %d datapoints were %s " +
                             "the threshold(%f). " +
                             "The most recent datapoints: %s.")
                reason = template % (evaluation_periods,
                                     self.CMP_STR_MAP[com_op], threshold,
                                     recent_datapoints)
                if state_value != 'ALARM':
                    new_state = {
                        'stateReason': reason,
                        'stateReasonData': reason_data,
                        'stateValue': 'ALARM'
                    }

                    self.update_alarm_state(alarmkey, 'ALARM', reason,
                                            json_reason_data, query_time)
                    self.cass.update_alarm_state(alarmkey, 'ALARM', reason,
                                                 json_reason_data, query_time)
                    self.alarm_history_state_update(alarmkey, alarm, new_state,
                                                    old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                         query_date)
                    LOG.audit("Alarm %s status changed to ALARM", alarm_name)
            else:
                template = _("Threshold Crossed: %d datapoints were not %s " +
                             "the threshold(%f). " +
                             "The most recent datapoints: %s.")
                reason = template % (evaluation_periods,
                                     self.CMP_STR_MAP[com_op], threshold,
                                     recent_datapoints)
                if state_value != 'OK':
                    new_state = {
                        'stateReason': reason,
                        'stateReasonData': reason_data,
                        'stateValue': 'OK'
                    }
                    self.update_alarm_state(alarmkey, 'OK', reason,
                                            json_reason_data, query_time)
                    self.cass.update_alarm_state(alarmkey, 'OK', reason,
                                                 json_reason_data, query_time)
                    self.alarm_history_state_update(alarmkey, alarm, new_state,
                                                    old_state)
                    self.do_alarm_action(alarmkey, alarm, new_state, old_state,
                                         query_date)
                    LOG.audit("Alarm %s status changed to OK", alarm_name)
Ejemplo n.º 36
0
 def __call__(self, req):
     start = utils.utcnow()
     rv = req.get_response(self.application)
     self.log_request_completion(rv, req, start)
     return rv
Ejemplo n.º 37
0
 def __call__(self, req):
     start = utils.utcnow()
     rv = req.get_response(self.application)
     self.log_request_completion(rv, req, start)
     return rv
Ejemplo n.º 38
0
    def put_metric_data(self, metric_key, timestamp, value, unit=None):
        
        def get_stats(tmp_stat):
            try:
                ret = dict(zip(self.cass.STATISTICS,
                                    map(lambda x: x.values()[0], tmp_stat)))
                for v in ret:
                    if v == None: v = float('nan') 
            except IndexError:
                storm.log("index %s is not in DB." % time_idx)
                ret = {'SampleCount' : float('nan'),
                        'Sum' : float('nan'),
                        'Average' : float('nan'),
                        'Minimum' : float('nan'),
                        'Maximum' : float('nan') }
            return ret
        
                 
        time_idx = timestamp.replace(second=0, microsecond=0)
        
        if timedelta(seconds=self.cass.STATISTICS_TTL) < (utils.utcnow() - 
                                                            time_idx):
            msg = "index %s is older than TTL. It doesn't need to insert DB"
            storm.log(msg % time_idx)
            return
        
        
        if time_idx not in self.df.index:
            self._reindex()
        
        value = utils.to_default_unit(value, unit)
        
        try:
            stat = self.df.ix[time_idx]
            
            for v in stat:
                if v == None: v = float('nan')                    
                 
        except KeyError:
            stat = self.cass.get_metric_statistics_for_key(metric_key, time_idx)
            stat = get_stats(stat)

        
        stat['SampleCount'] = 1.0 if isnan(stat['SampleCount']) \
                              else stat['SampleCount'] + 1.0
        stat['Sum'] = value if isnan(stat['Sum'])  \
                      else stat['Sum'] + value
        stat['Average'] = stat['Sum'] / stat['SampleCount']
        stat['Minimum'] = value \
                          if isnan(stat['Minimum']) or stat['Minimum'] > value \
                          else stat['Minimum']
        stat['Maximum'] = value \
                          if isnan(stat['Maximum']) or stat['Maximum'] < value \
                          else stat['Maximum']

        # insert into DB
        stat_dict = {
            'SampleCount':{time_idx: stat['SampleCount']},
            'Sum':{time_idx: stat['Sum']},
            'Average':{time_idx: stat['Average']},
            'Minimum':{time_idx: stat['Minimum']},
            'Maximum':{time_idx: stat['Maximum']}
        }        
        
        self.cass.insert_stat(self.metric_key, stat_dict)
        storm.log("metric data inserted %s" % (self.metric_key))
        
        #self.df.ix[time_idx] = stat
        
        now = utils.utcnow().replace(second=0, microsecond=0)
        timedelta_buf = now - time_idx
        
        if(timedelta_buf <= timedelta(seconds=self.MAX_START_PERIOD)):
            # check alarms
            self.check_alarms()
Ejemplo n.º 39
0
    def put_metric_data(self, metric_key, timestamp, value, unit=None):
        
        def get_stats(tmp_stat):
            try:
                ret = dict(zip(self.cass.STATISTICS,
                                    map(lambda x: x.values()[0], tmp_stat)))
                for v in ret:
                    if v == None: v = float('nan') 
            except IndexError:
                LOG.debug("index %s is not in DB.", time_idx)
                ret = {'SampleCount' : float('nan'),
                        'Sum' : float('nan'),
                        'Average' : float('nan'),
                        'Minimum' : float('nan'),
                        'Maximum' : float('nan') }
            return ret
        
        time_idx = timestamp.replace(second=0, microsecond=0)
        time_diff = utils.utcnow() - time_idx
        
        if timedelta(seconds=self.cass.statistics_ttl) < time_diff:
            msg = "index %s is older than TTL. It doesn't need to insert DB"
            LOG.debug(msg, time_idx)
            return
        
        if time_idx not in self.df.index:
            self._reindex()
       
        if value == None:
            LOG.info("metric inputted without value")
            return 
        else:
            value = utils.to_default_unit(value, unit)
        
        try:
            stat = self.df.ix[time_idx]
            
            for v in stat:
                if v == None: v = float('nan')                    
                 
        except KeyError:
            stat = self.cass.get_metric_statistics_for_key(metric_key,
                                                           time_idx)
            stat = get_stats(stat)

        
        stat['SampleCount'] = 1.0 if isnull(stat['SampleCount']) \
                              else stat['SampleCount'] + 1.0
        stat['Sum'] = value if isnull(stat['Sum'])  \
                      else stat['Sum'] + value
        stat['Average'] = stat['Sum'] / stat['SampleCount']
        stat['Minimum'] = value \
                          if (isnull(stat['Minimum']) or 
                              stat['Minimum'] > value) \
                          else stat['Minimum']
        stat['Maximum'] = value \
                          if (isnull(stat['Maximum']) or 
                              stat['Maximum'] < value) \
                          else stat['Maximum']

        # insert into DB
        stat_dict = {
            'SampleCount':{time_idx: stat['SampleCount']},
            'Sum':{time_idx: stat['Sum']},
            'Average':{time_idx: stat['Average']},
            'Minimum':{time_idx: stat['Minimum']},
            'Maximum':{time_idx: stat['Maximum']}
        }        
        
        ttl = self.cass.statistics_ttl - time_diff.total_seconds()
        self.updated_timestamp = utils.utcnow()
        if ttl > 0.1:
            self.cass.insert_stat(self.metric_key, stat_dict, ttl)
        else:
            LOG.debug("ttl must be positive, ttl %s", ttl)
        self.cass.update_metric(self.metric_key, {'updated_timestamp': 
                                                  self.updated_timestamp})
        LOG.info("metric data inserted %s, time_idx %s", str(self), time_idx)
Ejemplo n.º 40
0
    def put_metric_data(self, metric_key, timestamp, value, unit=None):
        def get_stats(tmp_stat):
            try:
                ret = dict(
                    zip(self.cass.STATISTICS,
                        map(lambda x: x.values()[0], tmp_stat)))
                for v in ret:
                    if v == None: v = float('nan')
            except IndexError:
                LOG.debug("index %s is not in DB.", time_idx)
                ret = {
                    'SampleCount': float('nan'),
                    'Sum': float('nan'),
                    'Average': float('nan'),
                    'Minimum': float('nan'),
                    'Maximum': float('nan')
                }
            return ret

        time_idx = timestamp.replace(second=0, microsecond=0)
        time_diff = utils.utcnow() - time_idx

        if timedelta(seconds=self.cass.statistics_ttl) < time_diff:
            msg = "index %s is older than TTL. It doesn't need to insert DB"
            LOG.debug(msg, time_idx)
            return

        if time_idx not in self.df.index:
            self._reindex()

        if value == None:
            LOG.info("metric inputted without value")
            return
        else:
            value = utils.to_default_unit(value, unit)

        try:
            stat = self.df.ix[time_idx]

            for v in stat:
                if v == None: v = float('nan')

        except KeyError:
            stat = self.cass.get_metric_statistics_for_key(
                metric_key, time_idx)
            stat = get_stats(stat)


        stat['SampleCount'] = 1.0 if isnull(stat['SampleCount']) \
                              else stat['SampleCount'] + 1.0
        stat['Sum'] = value if isnull(stat['Sum'])  \
                      else stat['Sum'] + value
        stat['Average'] = stat['Sum'] / stat['SampleCount']
        stat['Minimum'] = value \
                          if (isnull(stat['Minimum']) or
                              stat['Minimum'] > value) \
                          else stat['Minimum']
        stat['Maximum'] = value \
                          if (isnull(stat['Maximum']) or
                              stat['Maximum'] < value) \
                          else stat['Maximum']

        # insert into DB
        stat_dict = {
            'SampleCount': {
                time_idx: stat['SampleCount']
            },
            'Sum': {
                time_idx: stat['Sum']
            },
            'Average': {
                time_idx: stat['Average']
            },
            'Minimum': {
                time_idx: stat['Minimum']
            },
            'Maximum': {
                time_idx: stat['Maximum']
            }
        }

        ttl = self.cass.statistics_ttl - time_diff.total_seconds()
        self.updated_timestamp = utils.utcnow()
        if ttl > 0.1:
            self.cass.insert_stat(self.metric_key, stat_dict, ttl)
        else:
            LOG.debug("ttl must be positive, ttl %s", ttl)
        self.cass.update_metric(self.metric_key,
                                {'updated_timestamp': self.updated_timestamp})
        LOG.info("metric data inserted %s, time_idx %s", str(self), time_idx)
Ejemplo n.º 41
0
    def put_metric_alarm(self, context, project_id, metricalarm):
        """
        Send put metric alarm message to Storm 
        """
        def _validate_actions(alarm):
            for actions in (alarm.ok_actions, alarm.insufficient_data_actions, 
                            alarm.alarm_actions):
                for action in actions:
                    if utils.validate_groupnotification_action(action):
                        group = utils.parse_groupnotification_action(action)
                        if not self.cass.get_notification_group(group):
                            raise InvalidNotificationGroup()
                                            
        
        now = utils.utcnow()
        _validate_actions(metricalarm)
        metricalarm = metricalarm.to_columns()
        alarm_name = metricalarm['alarm_name']
        namespace = metricalarm['namespace']
        metric_name = metricalarm['metric_name']
        dimensions = json.loads(metricalarm['dimensions'])
        
        # check if we have metric in database
        metric_key = self.cass.get_metric_key_or_create(project_id=project_id,
            namespace=namespace, metric_name=metric_name,
            dimensions=dimensions, unit=metricalarm['unit'])
        
        update_data = {
            'project_id': project_id,
            'metric_key': str(metric_key),
            'alarm_arn': "arn:spcs:synaps:%s:alarm:%s" % (project_id,
                                                          alarm_name),
            'alarm_configuration_updated_timestamp': utils.strtime(now)
        }
        metricalarm.update(update_data)
        
        # check if metric is changed 
        alarm_key = self.cass.get_metric_alarm_key(project_id=project_id,
                                                   alarm_name=alarm_name)
        
        if alarm_key:            
            original_alarm = self.cass.get_metric_alarm(alarm_key)
            if (str(original_alarm['metric_key']) != 
                str(metricalarm['metric_key'])):
                raise InvalidRequest("Metric cannot be changed. "
                                     "Delete alarm and retry.")
        else:
            # If alarm is newly added, check quotas
            # check alarm quota per project
            project_quota = FLAGS.get('alarm_quota_per_project')
            alarms_in_project = self.cass.get_alarm_count(project_id)
            if alarms_in_project >= project_quota:
                LOG.info("Too many alarms(%d) in the project %s",
                         alarms_in_project, project_id)
                raise ProjectAlarmQuotaExceeded()
            
            # check alarm quota per metric  
            metric_quota = FLAGS.get('alarm_quota_per_metric')
            alarms_per_metric = self.cass.get_alarms_per_metric_count(
                            project_id, namespace, metric_name, dimensions)
            if alarms_per_metric >= metric_quota:
                LOG.info("Too many alarms(%d) for this metric",
                         alarms_per_metric)
                raise MetricAlarmQuotaExceeded()
        
        message = {'project_id': project_id, 'metric_key': str(metric_key),
                   'metricalarm': metricalarm, 'context': context.to_dict()}
        self.rpc.send_msg(rpc.PUT_METRIC_ALARM_MSG_ID, message)
        LOG.info("PUT_METRIC_ALARM_MSG sent")

        return {}