def on_message(self, topic, mauka_message): event_id = mauka_message.payload.event_id box_id = mauka_message.payload.box_id data = protobuf.util.repeated_as_ndarray(mauka_message.payload.data) start_time_ms = mauka_message.payload.start_timestamp_ms # this will check if a violation ocurred or not time_datum = 200.0 * 1.0 / constants.SAMPLES_PER_MILLISECOND lvl = [5, 7, 8] for idx in range(3): possible_violations = viol_check(data, lvl[idx]) if len(possible_violations) > 0: # there are violations for _, violation in enumerate(possible_violations): dev = max( abs(120.0 - numpy.max(data[violation[0], violation[1]])), abs(120.0 - numpy.min(data[violation[0], violation[1]]))) mongo.store_incident( event_id, box_id, start_time_ms + violation[0] * time_datum, start_time_ms + (violation[1] + 1) * time_datum, mongo.IncidentMeasurementType.VOLTAGE, dev, [mongo.IncidentClassification.SEMI_F47_VIOLATION], [], {}, self.mongo_client)
def ieee1159_voltage(mauka_message: protobuf.mauka_pb2.MaukaMessage, rms_features: np.ndarray, opq_mongo_client: mongo.OpqMongoClient = None): """ Calculate the ieee1159 voltage incidents and add them to the mongo database """ incidents, cycle_offsets = classify_ieee1159_voltage(rms_features) for idx, incident in enumerate(incidents): start_idx = cycle_offsets[idx][0] end_idx = cycle_offsets[idx][1] deviations = np.abs( rms_features[start_idx:end_idx]) - constants.NOMINAL_VRMS # The absolute value of the rms_features here and elsewhere is unecessary provided the rms has been applied max_deviation = np.amax(deviations) max_deviation_neg = np.amax(-deviations) if max_deviation < max_deviation_neg: max_deviation = -max_deviation_neg mongo.store_incident( mauka_message.payload.event_id, mauka_message.payload.box_id, mauka_message.payload.start_timestamp_ms + analysis.c_to_ms(start_idx), mauka_message.payload.start_timestamp_ms + analysis.c_to_ms(end_idx), mongo.IncidentMeasurementType.VOLTAGE, max_deviation, [incident], [], {}, opq_mongo_client)
def itic(mauka_message: protobuf.mauka_pb2.MaukaMessage, segment_threshold: float, logger=None, opq_mongo_client: mongo.OpqMongoClient = None): """ Computes the ITIC region for a given waveform. :param mauka_message: :param segment_threshold: Threshold for segmentation :param logger: Optional logger to use to print information :param opq_mongo_client: Optional DB client to re-use (otherwise new one will be created) :return: ITIC region. """ mongo_client = mongo.get_default_client(opq_mongo_client) if len(mauka_message.payload.data) < 0.01: return segments = analysis.segment(mauka_message.payload.data, segment_threshold) if logger is not None: logger.debug("Calculating ITIC with {} segments.".format( len(segments))) for segment in segments: start_idx = segment[0] end_idx = segment[1] + 1 subarray = mauka_message.payload.data[start_idx:end_idx] mean_rms = numpy.mean(subarray) itic_enum = itic_region(mean_rms, analysis.c_to_ms(len(subarray))) if itic_enum == IticRegion.NO_INTERRUPTION: continue else: incident_start_timestamp_ms = mauka_message.payload.start_timestamp_ms + analysis.c_to_ms( start_idx) incident_end_timestamp_ms = mauka_message.payload.start_timestamp_ms + analysis.c_to_ms( end_idx) if itic_enum is IticRegion.PROHIBITED: incident_classification = mongo.IncidentClassification.ITIC_PROHIBITED else: incident_classification = mongo.IncidentClassification.ITIC_NO_DAMAGE mongo.store_incident(mauka_message.event_id, mauka_message.box_id, incident_start_timestamp_ms, incident_end_timestamp_ms, mongo.IncidentMeasurementType.VOLTAGE, mean_rms - 120.0, [incident_classification], [], {}, mongo_client) if logger is not None: logger.debug( "Found ITIC incident [{}] from event {} and box {}".format( itic_enum, mauka_message.event_id, mauka_message.box_id))
def sliding_thd(self, event_id: int, box_id: str, box_event_start_timestamp: int, waveform: numpy.ndarray): """ Calculates sliding THD over a waveform. High THD values are then stored as incidents to the database. :param event_id: Event that this waveform came form. :param box_id: Box that this waveform came from. :param box_event_start_timestamp: Start timestamp of the provided waveform :param waveform: The waveform to calculate THD over. """ window_size = int( constants.SAMPLE_RATE_HZ * (self.sliding_window_ms / constants.MILLISECONDS_PER_SECOND)) windows = rolling_window(waveform, window_size) thds = [thd(window, constants.CYCLES_PER_SECOND) for window in windows] prev_beyond_threshold = False prev_idx = -1 max_thd = -1 for i, thd_i in enumerate(thds): if thd_i > max_thd: max_thd = thd_i if thd_i > self.threshold_percent: # We only care if this is the start of a new anomaly if not prev_beyond_threshold: prev_idx = i prev_beyond_threshold = True else: # We only care if this is the end of an anomaly if prev_beyond_threshold: prev_beyond_threshold = False # Every thd value is a sample over a 200 ms window incident_start_timestamp = int(box_event_start_timestamp + (prev_idx * self.sliding_window_ms)) incident_end_timestamp = int(box_event_start_timestamp + (i * self.sliding_window_ms) + self.sliding_window_ms) mongo.store_incident( event_id, box_id, incident_start_timestamp, incident_end_timestamp, mongo.IncidentMeasurementType.THD, max_thd, [mongo.IncidentClassification.EXCESSIVE_THD], [], {}, self.mongo_client)
def on_message(self, topic, mauka_message): """ Called async when a topic this plugin subscribes to produces a message :param topic: The topic that is producing the message :param mauka_message: The message that was produced """ self.debug("{} on_message".format(topic)) if protobuf.util.is_payload(mauka_message, protobuf.mauka_pb2.FREQUENCY_WINDOWED): self.debug("on_message {}:{} len:{}".format( mauka_message.payload.event_id, mauka_message.payload.box_id, len(mauka_message.payload.data))) incidents = frequency_incident_classifier( mauka_message.payload.event_id, mauka_message.payload.box_id, protobuf.util.repeated_as_ndarray(mauka_message.payload.data), mauka_message.payload.start_timestamp_ms, self.freq_ref, self.freq_var_high, self.freq_var_low, self.freq_interruption, self.frequency_window_cycles * constants.SAMPLES_PER_CYCLE, self.max_lull, logger=self.logger) for incident in incidents: mongo.store_incident( incident["event_id"], incident["box_id"], incident["incident_start_ts"], incident["incident_end_ts"], incident["incident_type"], incident["avg_deviation"], incident["incident_classifications"], incident["annotations"], incident["metadata"], incident["mongo_client"]) else: self.logger.error( "Received incorrect mauka message [%s] at FrequencyVariationPlugin", protobuf.util.which_message_oneof(mauka_message))
def ieee1159_voltage( mauka_message: protobuf.mauka_pb2.MaukaMessage, opq_mongo_client: mongo.OpqMongoClient, ieee1159_voltage_plugin: typing.Optional['Ieee1159VoltagePlugin'] = None ) -> typing.List[int]: """ Calculate the ieee1159 voltage incidents and add them to the mongo database """ data: typing.List[float] = list(mauka_message.payload.data) log.maybe_debug("Found %d Vrms values." % len(data), ieee1159_voltage_plugin) try: incidents = mauka_native_py.classify_rms( mauka_message.payload.start_timestamp_ms, data) log.maybe_debug("Found %d Incidents." % len(incidents), ieee1159_voltage_plugin) except Exception as exception: ieee1159_voltage_plugin.logger.error("Error getting V incidents: %s", str(exception)) incidents = [] incident_ids: typing.List[int] = [] array_data: np.ndarray = np.array(data) array_data = array_data - 120.0 for incident in incidents: try: incident_id = mongo.store_incident( ieee1159_voltage_plugin.request_next_available_incident_id(), mauka_message.payload.event_id, mauka_message.payload.box_id, incident.start_time_ms, incident.end_time_ms, mongo.IncidentMeasurementType.VOLTAGE, max(np.abs(array_data.min()), np.abs(array_data.max())), [INCIDENT_MAP[incident.incident_classification]], [], {}, opq_mongo_client, ieee_duration=DURATION_MAP[incident.incident_classification]) log.maybe_debug("Stored incident with id=%s" % incident_id, ieee1159_voltage_plugin) incident_ids.append(incident_id) except Exception as exception: ieee1159_voltage_plugin.logger.error( "Error storing V incident: %s", str(exception)) return incident_ids
def thd(mauka_message: protobuf.mauka_pb2.MaukaMessage, thd_threshold_percent: float, opq_mongo_client: mongo.OpqMongoClient, thd_plugin: typing.Optional['ThdPlugin'] = None) -> typing.List[int]: """ Calculates THD per cycle over a range of data. :param mauka_message: The Mauka message. :param thd_threshold_percent: The THD threshold percent. :param opq_mongo_client: An instance of an OPQ mongo client. :param thd_plugin: An instance of the THD plugin. :return: A list of incident ids (if any) """ try: data: typing.List[float] = list(mauka_message.payload.data) log.maybe_debug("Found %d samples." % len(data), thd_plugin) incidents = mauka_native_py.classify_thd(mauka_message.payload.start_timestamp_ms, thd_threshold_percent, data) log.maybe_debug("Found %d THD Incidents." % len(incidents), thd_plugin) except Exception as exception: incidents = [] thd_plugin.logger.error("Error finding THD incidents: %s", str(exception)) incident_ids: typing.List[int] = [] for incident in incidents: try: incident_id = mongo.store_incident( thd_plugin.request_next_available_incident_id(), mauka_message.payload.event_id, mauka_message.payload.box_id, incident.start_time_ms, incident.end_time_ms, mongo.IncidentMeasurementType.VOLTAGE, -1, [mongo.IncidentClassification.EXCESSIVE_THD], [], {}, opq_mongo_client ) log.maybe_debug("Stored incident with id=%s" % incident_id, thd_plugin) incident_ids.append(incident_id) except Exception as exception: thd_plugin.logger.error("Error storing THD incident %s", str(exception)) return incident_ids
def on_message(self, topic, mauka_message): """ Called async when a topic this plugin subscribes to produces a message :param topic: The topic that is producing the message :param mauka_message: The message that was produced """ self.debug("{} on_message".format(topic)) if protobuf.pb_util.is_payload(mauka_message, protobuf.mauka_pb2.VOLTAGE_RAW): self.debug("on_message {}:{} len:{}".format(mauka_message.payload.event_id, mauka_message.payload.box_id, len(mauka_message.payload.data))) incidents = transient_incident_classifier(mauka_message.payload.event_id, mauka_message.payload.box_id, protobuf.pb_util.repeated_as_ndarray(mauka_message.payload.data), mauka_message.payload.start_timestamp_ms, self.configs) for incident in incidents: incident_id = mongo.store_incident( self.request_next_available_incident_id(), incident["event_id"], incident["box_id"], incident["incident_start_ts"], incident["incident_end_ts"], incident["incident_type"], incident["max_deviation"], incident["incident_classifications"], incident["annotations"], incident["metadata"], incident["mongo_client"] ) # Produce a message to the GC self.produce(Routes.laha_gc, protobuf.pb_util.build_gc_update(self.name, protobuf.mauka_pb2.INCIDENTS, incident_id)) else: self.logger.error("Received incorrect mauka message [%s] at TransientPlugin", protobuf.pb_util.which_message_oneof(mauka_message))
def on_message(self, topic: str, mauka_message: protobuf.mauka_pb2.MaukaMessage): if protobuf.pb_util.is_heartbeat_message(mauka_message): if OutagePlugin.NAME == mauka_message.source: now = unix_time_millis(datetime.datetime.utcnow()) self.debug("Recv outage heartbeat last_update=%s now=%s" % (str(self.last_update), str(now))) box_to_max_timestamp: typing.Dict[str, int] = {} box_to_min_timestamp: typing.Dict[str, int] = {} measurements = self.mongo_client.measurements_collection.find( {"timestamp_ms": { "$gte": self.last_update }}, projection={ "_id": False, "box_id": True, "timestamp_ms": True }) # Find the min and max timestamps for each box total_measurements = 0 for measurement in measurements: box_id = measurement["box_id"] timestamp = measurement["timestamp_ms"] if box_id not in box_to_max_timestamp: box_to_max_timestamp[box_id] = 0 box_to_min_timestamp[box_id] = 9999999999999999999999 if timestamp > box_to_max_timestamp[box_id]: box_to_max_timestamp[box_id] = timestamp if timestamp < box_to_min_timestamp[box_id]: box_to_min_timestamp[box_id] = timestamp total_measurements += 1 self.debug("Processed %d measurements" % total_measurements) self.debug(str(box_to_max_timestamp)) self.debug(str(box_to_min_timestamp)) # Merge the max timestamps for each box into the last seen dictionary self.box_to_last_seen = { **self.box_to_last_seen, **box_to_max_timestamp } self.debug(str(self.box_to_last_seen)) # Check for outages for box_id, last_seen in self.box_to_last_seen.items(): # Outage if now - last_seen > 60_000: self.debug("Outage box_id=%s last_seen=%d" % (box_id, last_seen)) # Ignore if box is marked as unplugged if is_unplugged(self.mongo_client, box_id): self.debug( "Ignoring outage because box_id=%s is unplugged" % box_id) if box_id in self.prev_incident_ids: del self.prev_incident_ids[box_id] continue # Fresh outage if box_id not in self.prev_incident_ids: incident_id = mongo.store_incident( self.request_next_available_incident_id(), -1, box_id, int(now), -1, mongo.IncidentMeasurementType.HEALTH, -1.0, [mongo.IncidentClassification.OUTAGE], opq_mongo_client=self.mongo_client, copy_data=False) self.debug( "Fresh outage incident_id=%d box_id=%s" % (incident_id, box_id)) self.prev_incident_ids[box_id] = incident_id # Ongoing outage else: prev_incident_id = self.prev_incident_ids[box_id] self.debug( "Ongoing outage incident_id=%d box_id=%s" % (prev_incident_id, box_id)) # Update previous incident self.mongo_client.incidents_collection.update_one( {"incident_id": prev_incident_id}, {"$set": { "end_timestamp_ms": int(now) }}) # No outage else: self.debug("No outage for box_id=%s" % box_id) # Outage over if box_id in self.prev_incident_ids: prev_incident_id = self.prev_incident_ids[box_id] self.debug("Outage over incident_id=%d box_id=%s" % (prev_incident_id, box_id)) # Update previous incident self.mongo_client.incidents_collection.update_one( {"incident_id": prev_incident_id}, { "$set": { "end_timestamp_ms": int(box_to_min_timestamp[box_id]) } }) # Produce a message to the GC self.produce( Routes.laha_gc, protobuf.pb_util.build_gc_update( self.name, protobuf.mauka_pb2.INCIDENTS, prev_incident_id)) del self.prev_incident_ids[box_id] self.last_update = now
def itic(mauka_message: protobuf.mauka_pb2.MaukaMessage, segment_threshold: float, itic_plugin: typing.Optional['IticPlugin'] = None, opq_mongo_client: typing.Optional[mongo.OpqMongoClient] = None) -> typing.List[int]: """ Computes the ITIC region for a given waveform. :param itic_plugin: An instance of this plugin. :param mauka_message: A mauka message. :param segment_threshold: Threshold for segmentation :param opq_mongo_client: Optional DB client to re-use (otherwise new one will be created) :return: ITIC region. """ mongo_client = mongo.get_default_client(opq_mongo_client) if len(mauka_message.payload.data) < 0.01: maybe_debug(itic_plugin, "Bad payload data length: %d" % len(mauka_message.payload.data)) maybe_debug(itic_plugin, "Preparing to get segments for %d Vrms values" % len(mauka_message.payload.data)) # segments = analysis.segment(mauka_message.payload.data, segment_threshold) try: segments = analysis.segment_array(numpy.array(list(mauka_message.payload.data))) except Exception as exception: itic_plugin.logger.error("Error segmenting data for ITIC plugin: %s", str(exception)) segments = [] if len(segments) == 0: maybe_debug(itic_plugin, "No segments found. Ignoring") return [] maybe_debug(itic_plugin, "Calculating ITIC with {} segments.".format(len(segments))) incident_ids = [] for i, segment in enumerate(segments): try: segment_len = analysis.c_to_ms(len(segment)) start_t = analysis.c_to_ms(sum([len(segments[x]) for x in range(0, i)])) end_t = start_t + segment_len mean_rms = segment.mean() maybe_debug(itic_plugin, "start=%f end=%f mean=%f" % (start_t, end_t, mean_rms)) itic_enum = itic_region(mean_rms, segment_len) if itic_enum == IticRegion.NO_INTERRUPTION: maybe_debug(itic_plugin, "NO_INTERRUPTION") continue else: incident_start_timestamp_ms = mauka_message.payload.start_timestamp_ms + start_t incident_end_timestamp_ms = mauka_message.payload.start_timestamp_ms + end_t if itic_enum is IticRegion.PROHIBITED: maybe_debug(itic_plugin, "PROHIBITED") incident_classification = mongo.IncidentClassification.ITIC_PROHIBITED else: maybe_debug(itic_plugin, "NO_DAMAGE") incident_classification = mongo.IncidentClassification.ITIC_NO_DAMAGE incident_id = mongo.store_incident( itic_plugin.request_next_available_incident_id(), mauka_message.payload.event_id, mauka_message.payload.box_id, incident_start_timestamp_ms, incident_end_timestamp_ms, mongo.IncidentMeasurementType.VOLTAGE, mean_rms - 120.0, [incident_classification], [], {}, mongo_client) maybe_debug(itic_plugin, "Stored incident") maybe_debug(itic_plugin, "Found ITIC incident [{}] from event {} and box {}".format( itic_enum, mauka_message.event_id, mauka_message.box_id)) incident_ids.append(incident_id) except Exception as exception: itic_plugin.logger.error("Error storing ITIC incident: %s", str(exception)) return incident_ids
def find_frequency_variation_incidents( mauka_message: mauka_pb2.MaukaMessage, frequency_threshold_low: float, frequency_threshold_high: float, min_incident_len_c: float, opq_mongo_client: mongo.OpqMongoClient, plugin: typing.Optional['FrequencyVariationPlugin'] = None ) -> typing.List[int]: """ Finds frequency variation incidents. :param mauka_message: The mauka message containing the waveform payload. :param frequency_threshold_low: The low f threshold. :param frequency_threshold_high: The high f thresholds. :param min_incident_len_c: The minimum incident length. :param opq_mongo_client: An instance of a mongo client. :param plugin: An instance of this plugin. :return: A list of incident ids. """ frequencies_per_cycle: typing.List[float] = list( mauka_message.payload.data) log.maybe_debug("Found %d frequencies" % len(frequencies_per_cycle), plugin) bounds = [[0.0, frequency_threshold_low], [frequency_threshold_high, 1_000_000]] log.maybe_debug("Using bounds=%s" % str(bounds), plugin) try: ranges = native.bounded_ranges( mauka_message.payload.start_timestamp_ms, frequencies_per_cycle, bounds) log.maybe_debug("Found %d F ranges" % (len(ranges)), plugin) except Exception as exception: plugin.logger.error("Error getting F ranges: %s", str(exception)) ranges = [] incident_ids: typing.List[int] = [] for incident_range in ranges: try: print_range(incident_range, plugin) if incident_range.end_idx - incident_range.start_idx < min_incident_len_c: log.maybe_debug( "Ignoring incident with len_c = %f" % (incident_range.end_idx - incident_range.start_idx), plugin) continue log.maybe_debug("Before finding max_deviation", plugin) max_deviation = 60.0 - max( min(frequencies_per_cycle[incident_range. start_idx:incident_range.end_idx]), max(frequencies_per_cycle[incident_range. start_idx:incident_range.end_idx])) log.maybe_debug("After finding max_deviation", plugin) log.maybe_debug("max_deviation=%f" % max_deviation, plugin) if incident_range.bound_min == bounds[0][ 0] and incident_range.bound_max == bounds[0][1]: log.maybe_debug("frequency_sag", plugin) incident_id = mongo.store_incident( plugin.request_next_available_incident_id(), mauka_message.payload.event_id, mauka_message.payload.box_id, incident_range.start_ts_ms, incident_range.end_ts_ms, mongo.IncidentMeasurementType.FREQUENCY, max_deviation, [mongo.IncidentClassification.FREQUENCY_SAG], [], {}, opq_mongo_client) incident_ids.append(incident_id) log.maybe_debug("Stored incident with id=%s" % incident_id, plugin) elif incident_range.bound_min == bounds[1][ 0] and incident_range.bound_max == bounds[1][1]: # Frequency swell log.maybe_debug("frequency_swell", plugin) incident_id = mongo.store_incident( plugin.request_next_available_incident_id(), mauka_message.payload.event_id, mauka_message.payload.box_id, incident_range.start_ts_ms, incident_range.end_ts_ms, mongo.IncidentMeasurementType.FREQUENCY, max_deviation, [mongo.IncidentClassification.FREQUENCY_SWELL], [], {}, opq_mongo_client) incident_ids.append(incident_id) log.maybe_debug("Stored incident with id=%s" % incident_id, plugin) else: # Unknown log.maybe_debug( "Unknown range bounds = %d, %d" % (incident_range.bound_min, incident_range.bound_max), plugin) except Exception as exception: plugin.logger("Error storing frequency variation incident: %s", str(exception)) return incident_ids