def receiveHandler(self, item): parsedData, sensordescription, clock, quality = item L.d2(parsedData) # print parsedData if self.sql: self.sql.insert_observation(sensordescription, parsedData, dictdeepcopy(quality)) if ResourceManagement.args.messagebus or ResourceManagement.args.triplestore: # if len(parsedData.fields)>0: g = self.annotator.annotateObservation(parsedData, sensordescription, clock, quality) del quality if ( ResourceManagement.args.messagebus and not sensordescription.no_publish_messagebus and "fields" in parsedData and len(parsedData.fields) > 0 ): message = g.serialize(format="n3") # print message key = sensordescription.messagebus.routingKey # self.messageBusQueue.add((parsedData.dumps(), self.rabbitmqchannel, RabbitMQ.exchange_data, key)) self.messageBusQueue.add((message, RabbitMQ.exchange_annotated_data, key)) if self.ui.api: self.ui.api.update_observation_cache(str(sensordescription.uuid), message) if ResourceManagement.args.triplestore: # TODO: The following line is commented out, since the Virtuoso makes so much trouble # ThreadedTriplestoreAdapter.getOrMake(sensordescription.graphName).addGraph(g) pass if ResourceManagement.args.messagebus or ResourceManagement.args.triplestore: del g if ResourceManagement.args.aggregate: self.aggregationQueue.add((parsedData, sensordescription)) else: del parsedData
def initialise(self, description, clock): self.reputationSystem = ReputationSystem(description, clock) # self.reputationSystem.addSink(CSVSink()) # self.reputationSystem.addSink(VirtuosoSink(self.messageBusQueue)) # self.reputationSystem.addSink(TerminalSink()) self.reputationSystem.addSink(BlackHoleSink()) self.reputationSystem.addQoIMetric(Frequency()) self.reputationSystem.addQoIMetric(Completeness()) self.reputationSystem.addQoIMetric(Age()) self.reputationSystem.addQoIMetric(Latency()) self.reputationSystem.addQoIMetric(Correctness()) self.initialised = True L.d2("CpQoiSystem initialised")
def update(self, data): L.d2("ReputationSystem: update called for Stream", self.description.fullSensorID) self.setTimestamp(data) returnValues = {} if data is not None: for m in self.metrics: value = m.update(data) if value: returnValues[value[0]] = value[1] for sink in self.sinkList: sink.update(m) self.addClockJob(True) self.avgQoIManager.calculateAvgQualities(returnValues) return returnValues
def insert_observation(self, sd, ob, q): # L.d("Inserting observation", ob) query = None try: _ob = ob.deepcopy() _ob.fields = filter(lambda x: not self.is_timestamp_field(x, sd), ob.fields) if sd.isTimestampedStream() and sd.timestamp.inField in _ob: _ob.remove_item(sd.timestamp.inField) if 'latency' in _ob: _ob.remove_item('latency') primary_ob_uuid = None p_s_values = [] for _f in ob.fields: if _f not in ob or (sd.isTimestampedStream() and sd.timestamp.inField == _f): continue if not query: field = ob[_f] primary_ob_uuid = self._escape_string(None, str(field.observationID)) v = [ "TIMESTAMP " + self._escape_string(None, field.observationSamplingTime), self._escape_string(None, str(sd.uuid)), primary_ob_uuid, self._escape_string(None, _ob.dumps(), singleqoute_to_double=True), self._escape_string(None, JOb(q).dumps()) ] query = "INSERT INTO %s.cp_observations (%s) VALUES (%s);\n" % (SQL.SCHEMA, ','.join(SQL.cp_observation_fields), ','.join(v)) del v p_s_values.append("(%s, %s)" % (primary_ob_uuid, self._escape_string(None, str(field.observationID)))) if query: query += "INSERT INTO %s.p_s_observation_uuid (main, secondary) VALUES %s;\n" % (SQL.SCHEMA, ','.join(p_s_values)) L.d2("Using query:", query) if query: self.curs.execute(query) self.conn.commit() del query del _ob return True except Exception as e: self.conn.rollback() L.e(e) L.e("SQL query used:", query) return False
def aggregate(self, data, sensordescription): if not data.fields: # Log.e("There was no data available so it could not be aggregated") return None result = [] try: saxobjs = self.aggregation_objs[sensordescription.uuid] for f in saxobjs: g = saxobjs[f].control(data[f]) if g: r = JSONObject() r.graph = g r.sensorID = sensordescription.sensorID r.propertyType = sensordescription.field[f].propertyName r.category = sensordescription.sensorType result.append(r) del data if result is not None and len(result) is not 0: Log.d2('SaxAggregator: Aggregation successful %s' % str(result)) return result except Exception as e: Log.e("aggregation failed due to Exception", e) return None
def sendMessageHandler(self, item): message, exchange, key = item L.d2(message) RabbitMQ.sendMessage(message, exchange, key) del message
def __init__(self): self.initialised = False self.reputationSystem = None self.messageBusQueue = None L.d2("CpQoiSystem started")
def addClockJob(self, deleteOldJob=False): L.d2("ReputationSystem: addClockJob for Stream", self.description.fullSensorID, "with", self.validationInterval) if deleteOldJob and self.jobID: self.clock.removeJob(self.jobID) self.jobID = self.clock.addJob(self.validationInterval, self.checkTimeRelevantMetrics, args=(self.timestamp), reoccurring=False)
def add(self, item): if self.queue.full(): L.d2("trying to add something into a full queue:", item) self.queue.put(item, True) L.d2("QueueThread size:", self.queue.qsize())
def update(self): from virtualisation.resourcemanagement.resourcemanagement import ResourceManagement # print "time", self.clock.now() latStart = datetime.now() L.d("processing:", self.getSensorDescription().sensorID) # L.d(self.clock.now()) if self.replaymode: self.stats.startMeasurement("Update_replay") # self.clock.pause() if self.historyreader: L.d2("abstractwrapper get data") self.stats.startMeasurement("Update_replay.Historyreader") data_raw = self.historyreader.tick(self.clock) self.stats.stopMeasurement("Update_replay.Historyreader") L.d2("abstractwrapper received data:", str(data_raw)) if data_raw: data_list = [data_raw] if not self.historyreader.multiple_observations else data_raw for data in data_list: try: L.d2("abstractwrapper parse data") # print "data to parse", data self.stats.startMeasurement("Update_replay.Historyparser") parsed = self.historyparser.parse(data, self.clock) self.stats.stopMeasurement("Update_replay.Historyparser") L.d2("abstractwrapper parsed data:", str(parsed)) del data if parsed: self.stats.startMeasurement("Update_replay.Preparation") ObservationIDGenerator.addObservationIDToFields(parsed) parsed.producedInReplayMode = True parsed.recovered = False parsed.latency = (datetime.now() - latStart).total_seconds() self.stats.stopMeasurement("Update_replay.Preparation") # QoI Start quality = None if self.qoiSystem: L.d2("abstractwrapper get quality") self.stats.startMeasurement("Update_replay.Quality") quality = self.qoiSystem.addData(self.getSensorDescription(), parsed, self.clock) self.stats.stopMeasurement("Update_replay.Quality") L.d2("abstractwrapper quality:", quality) if self.faultRecoveryActive: L.d2("abstractwrapper update fault recovery") self.stats.startMeasurement("Update_replay.FaultRecoveryUpdate") self.updateFaultRecoveries(parsed, quality) self.stats.stopMeasurement("Update_replay.FaultRecoveryUpdate") L.d2("abstractwrapper fault recovery updated") self.stats.startMeasurement("Update_replay.Receiver") for r in self.receiver: L.d2("abstractwrapper start receiver", r) r.receive(parsed, self.getSensorDescription(), self.clock, quality) L.d2("abstractwrapper receiver", r, "finished") self.stats.stopMeasurement("Update_replay.Receiver") except Exception as e: L.e("Error while updating sensor", self.getSensorDescription().fullSensorID, e) finally: if ResourceManagement.args.gentle: self.clock.sleep() else: L.d("there is no data, ask fault recovery1") # L.i(self.getSensorDescription().sensorID) # L.i(self.clock.now()) try: self.stats.startMeasurement("Update_replay.Recovery") data = JSONObject() data.latency = 0 data.producedInReplayMode = True data.recovered = True data.fields = [] for n in self.getSensorDescription().fields: if n in self.faultRecoveries and self.faultRecoveries[n].isReady(): data.fields.append(n) data[n] = JSONObject() # at this point the dataType is in FAULT_RECOVERY_SUPPORTED_DATATYPES and we can safely use cast data[n].value = self.faultRecoveryCast( self.faultRecoveries[n].getEstimation(), self.getSensorDescription().field[n].dataType, ) data[n].propertyName = self.getSensorDescription().field[n].propertyName data[n].propertyURI = self.getSensorDescription().field[n].propertyURI if "unit" in self.getSensorDescription().field[n]: data[n].unit = self.getSensorDescription().field[n].unit data[n].sensorID = self.getSensorDescription().fullSensorID data[n].observationSamplingTime = self.clock.timeAsString() data[n].observationResultTime = data[n].observationSamplingTime self.stats.stopMeasurement("Update_replay.Recovery") self.stats.startMeasurement("Update_replay.ObservationIDGenerator") ObservationIDGenerator.addObservationIDToFields(data) self.stats.stopMeasurement("Update_replay.ObservationIDGenerator") quality = None if self.qoiSystem: self.stats.startMeasurement("Update_replay.Quality") quality = self.qoiSystem.addData(self.getSensorDescription(), data, self.clock) self.stats.stopMeasurement("Update_replay.Quality") self.stats.startMeasurement("Update_replay.Receiver") for r in self.receiver: r.receive(data, self.getSensorDescription(), self.clock, quality) self.stats.stopMeasurement("Update_replay.Receiver") except Exception as e: L.e("Error while updating sensor", self.getSensorDescription().fullSensorID, e) finally: pass # if ResourceManagement.args.gentle: # self.clock.sleep() else: pass # no history reader - nothing to do self.stats.stopMeasurement("Update_replay") else: # no replay mode self.stats.startMeasurement("Update_live") if self.connection: try: self.stats.startMeasurement("Update_live.Connection") data_raw = self.connection.next() self.stats.stopMeasurement("Update_live.Connection") if data_raw: data_list = [data_raw] if not self.connection.multiple_observations else data_raw for data in data_list: self.stats.startMeasurement("Update_live.Parser") parsed = self.parser.parse(data, self.clock) self.stats.stopMeasurement("Update_live.Parser") if parsed: self.stats.startMeasurement("Update_live.Preparation") ObservationIDGenerator.addObservationIDToFields(parsed) parsed.producedInReplayMode = False parsed.recovered = False parsed.latency = (datetime.now() - latStart).total_seconds() self.stats.stopMeasurement("Update_live.Preparation") # QoI Start quality = None if self.qoiSystem: # TODO update the timestamp self.stats.startMeasurement("Update_live.Quality") quality = self.qoiSystem.addData(self.getSensorDescription(), parsed, self.clock) self.stats.stopMeasurement("Update_live.Quality") if self.faultRecoveryActive: L.d2("abstractwrapper update fault recovery") self.stats.startMeasurement("Update_live.FaultRecoveryUpdate") self.updateFaultRecoveries(parsed, quality) self.stats.stopMeasurement("Update_live.FaultRecoveryUpdate") L.d2("abstractwrapper fault recovery updated") self.stats.startMeasurement("Update_live.Receiver") for r in self.receiver: r.receive(parsed, self.getSensorDescription(), self.clock, quality) self.stats.stopMeasurement("Update_live.Receiver") else: # fault recovery L.i("there is no data, ask fault recovery2") try: self.stats.startMeasurement("Update_live.Recovery") data = JSONObject() data.latency = 0 data.recovered = True data.fields = [] for n in self.getSensorDescription().fields: if n in self.faultRecoveries and self.faultRecoveries[n].isReady(): data.fields.append(n) data[n] = JSONObject() data[n].value = self.faultRecoveryCast( self.faultRecoveries[n].getEstimation(), self.getSensorDescription().field[n].dataType, ) data[n].propertyName = self.getSensorDescription().field[n].propertyName data[n].propertyURI = self.getSensorDescription().field[n].propertyURI if "unit" in self.getSensorDescription().field[n]: data[n].unit = self.getSensorDescription().field[n].unit data[n].sensorID = self.getSensorDescription().fullSensorID data[n].observationSamplingTime = self.clock.timeAsString() data[n].observationResultTime = data[n].observationSamplingTime self.stats.stopMeasurement("Update_live.Recovery") ObservationIDGenerator.addObservationIDToFields(data) quality = None if self.qoiSystem: self.stats.startMeasurement("Update_live.Quality") quality = self.qoiSystem.addData(self.getSensorDescription(), data, self.clock) self.stats.stopMeasurement("Update_live.Quality") self.stats.startMeasurement("Update_live.Receiver") for r in self.receiver: r.receive(data, self.getSensorDescription(), self.clock, quality) self.stats.stopMeasurement("Update_live.Receiver") except Exception as e: L.e( "Error while updating sensor (fault recovery)", self.getSensorDescription().fullSensorID, str(e), ) finally: pass # if ResourceManagement.args.gentle: # self.clock.sleep() except Exception as e: L.e( "Error while updating sensor (not fault recovery)", self.getSensorDescription().fullSensorID, str(e), ) else: pass # no live mode supported self.stats.stopMeasurement("Update_live")