def registerExchanges(cls): for ex in RabbitMQ.exchanges: try: RabbitMQ.declareExchange(ex, _type="topic") except Exception as e: L.e('Exchange %s could not be declared: %s' % (ex, e.message)) L.e('Exception:', str(e))
def aggregate(self, data, sensordescription): """ this method is called when stream data has been annotated. :param data: :param sensordescription: :return: The aggregated data """ if not data.fields: return None result = [] try: aggregation_objs = self.aggregation_objs[sensordescription.uuid] for key, agg in aggregation_objs.items(): agg_result = agg.control(data[key]) if agg_result: g, start, end, size = agg_result r = JSONObject() r.graph = g r.field = sensordescription.field[key] r.sensorID = sensordescription.sensorID r.propertyName = sensordescription.field[key].propertyName r.category = sensordescription.sensorType r.aggregationMethod = sensordescription.field[key].aggregationMethod r.aggregationConfiguration = sensordescription.field[key].aggregationConfiguration r.start = start r.end = end r.size = size result.append(r) return result except Exception as e: Log.e("aggregation failed due to Exception", e) return None
def setReplayMode(self, mode): super(BrasovIncidentWrapper, self).setReplayMode(mode) # fieldnames of service: "id", "comments", "createdon", "description", "guid", "incidentState", "incidentid", "indsoft_publiclyvisible", "statecode", "ticketnumber", "timestamp", "title", "x", "y" fieldnames = [ "id", "comments", "createdon", "description", "guid", "i", "incidentState", "incidentid", "indsoft_publiclyvisible", "statecode", "ticketnumber", "timestamp", "title", "x", "y", ] try: fobj = AbstractWrapper.getFileObject(__file__, "incidents%d.csv" % self.number, "rU") self.historyreader = CSVHistoryReader(self, fobj, delimiter=",") self.historyreader.multiple_observations = False self.historyparser = CSVParser(self, fieldnames) except Exception as e: Log.e("setReplayMode in Brasov Incident Wrapper", self.number, e) self.historyreader = None
def __init__(self, gdi_config, rm): self.rm = rm self.PAGINATION_LIMIT = 100 connect_str = "host='%s' dbname='%s' user='******' password='******' port=%d" % ( gdi_config.host, gdi_config.database, gdi_config.username, gdi_config.password, gdi_config.port) self.conn = psycopg2.connect(connect_str) self.curs = self.conn.cursor() try: self.curs.execute("CREATE SCHEMA IF NOT EXISTS %s;" % SQL.SCHEMA) # self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % (SQL.SCHEMA,)) self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % ("public",)) cols = ["sampling_time TIMESTAMP", "sensor_uuid UUID", "observation_uuid UUID", "data JSON", "quality JSON"] query = 'CREATE TABLE IF NOT EXISTS %s.cp_observations ( %s, PRIMARY KEY (%s), FOREIGN KEY (sensor_uuid) REFERENCES %s.cp_sensors(sensor_uuid));\n' % (SQL.SCHEMA, ', '.join(cols), ", ".join(["observation_uuid"]), "public") self.curs.execute(query) # index over sampling_time and sensor_uuid # since a 'IF NOT EXISTS' is not available for us (version < 9.5) # the error is catched in a separate try-catch try: query = 'CREATE INDEX "timeindex" ON %s.cp_observations USING btree (sampling_time);' % (SQL.SCHEMA,) self.curs.execute(query) query = 'CREATE INDEX uuidindex ON %s.cp_observations USING btree (sensor_uuid);' % (SQL.SCHEMA,) self.curs.execute(query) except: pass # primary secondary observation_uuid map query = 'CREATE TABLE IF NOT EXISTS %s.p_s_observation_uuid (main UUID, secondary UUID);' % (SQL.SCHEMA,) self.curs.execute(query) self.conn.commit() L.i("SQL: schema/tables created") except Exception as e: L.e("SQL: Could not create schema/tables", e) self.conn.rollback()
def next(self): url = self.source or self.wrapper.getSensorDescription().source try: return self.load(url) except: Log.e("HttpPullConnection: failed to load", url) return None
def setReplayMode(self, mode): if mode: try: self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "pollution-%s.csv" % self.sensorDescription.sensorID), "rU"), delimiter=';') self.historyparser = CSVParser(self, self.historyreader.headers) except Exception as e: Log.e(e) self.historyreader = None super(InternalBrasovWrapper, self).setReplayMode(mode)
def load(self, url): try: f = urllib2.urlopen(url, timeout=10) r = f.read() f.close() return r except: Log.e(self.__class__.__name__, "error in load") return None
def start(self): if self.replaymode: try: self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "weatherAW-%s.csv" % self.sensorDescription.sensorID), "rU"), delimiter=';') self.historyparser = CSVParser(self, self.historyreader.headers) except Exception as e: Log.e(e) self.historyreader = None super(InternalWeatherAWWrapper, self).start()
def saveGraph(self, graph, graphName): serialisation = graph.serialize(destination=None, format='nt', encoding=None) queryString = "".join(["INSERT DATA INTO GRAPH <", self.getGraphURI(graphName), "> {", serialisation, "}"]) sparql = self.getSparqlObject(graphName, queryString) try: sparql.query() except HTTPError as e: L.e("Sparql Endpoint HTTPError in saveGraph:", str(e.code), e.reason) except Exception as e: L.e("Error in saveGraph:", e.message)
def deleteGraph(self, graphName): queryString = "DEFINE sql:log-enable 3 DROP SILENT GRAPH <" + self.getGraphURI(graphName) + ">" L.d("deleteGraph using query:", queryString) sparql = self.getSparqlObject(graphName, queryString) sparql.setTimeout(300) try: ret = sparql.query() return True except Exception as e: L.e("Error in deleteGraph:", e.message) return False
def removeAllSensorStreams(self): try: sql = "DELETE FROM cp_sensors" self.curs.execute(sql) self.conn.commit() return True except: self.conn.rollback() L.e("Cannot delete all Sensors") L.e("SQL query used", sql) return False
def removeSensorStream(self, sensor_uuid): sql = ("DELETE FROM cp_sensors WHERE sensor_uuid='%(sensor_uuid)s'" % {'sensor_uuid': sensor_uuid}) try: self.curs.execute(sql) self.conn.commit() return True except: self.conn.rollback() L.e("Cannot delete Sensor: " + str({'sensor_uuid': sensor_uuid})) L.e("SQL query used:", sql) return False
def createGraph(self, graphName): queryString = "CREATE GRAPH <" + self.getGraphURI(graphName) + ">" sparql = self.getSparqlObject(graphName, queryString) try: ret = sparql.query().convert() return True except HTTPError as e: L.e("Sparql Endpoint HTTPError in createGraph:", str(e.code), e.reason) except Exception as e: L.e("Error in createGraph:", e.message) return False
def saveTriple(self, graphName, subject, predicate, object): sparql = self.getSparqlObject(graphName) # insert into doesn't work with set default graph, have to "... INSERT DATA INTO <graph>..." queryString = "INSERT DATA INTO <" + self.getGraphURI( graphName) + "> { <" + subject + "> <" + predicate + "> <" + object + "> }" sparql.setQuery(queryString) try: sparql.query() except HTTPError as e: L.e("Sparql Endpoint HTTPError in saveTriple:", str(e.code), e.reason) except Exception as e: L.e("Error in saveTriple:", e.message)
def saveMultipleGraphs(self, serialisedGraph, graphName=None): queryString = "".join(["INSERT DATA { GRAPH <", self.getGraphURI(graphName), "> {", serialisedGraph, "}}"]) sparql = self.getSparqlObject(graphName, queryString) sparql.queryType = INSERT try: ret = sparql.query() except EndPointInternalError as e: #transaction deadlock case raise SPARQL_Exception() except EndPointNotFound as e: #temporarily 404 error raise SPARQL_Exception() except Exception as e: L.e("Error in saveMultipleGraphs:", e.message) raise StoreOffline_Exception()
def graphExists(self, graphName): queryString = "ASK { GRAPH <" + self.getGraphURI(graphName) + "> { ?s ?p ?o . }}" sparql = self.getSparqlObject(graphName, queryString) # print queryString try: ret = sparql.query() retList = ret.convert() # print retList return retList["boolean"] except HTTPError as e: L.e("Sparql Endpoint HTTPError in graphExists:", str(e.code), e.reason) except Exception as e: L.e("Error in graphExists:", e.message)
def setReplayMode(self, mode): if mode: try: Log.i("loading history for", self.sensorDescription.sensorID, "...") self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "trafficData%d.csv" % self.sensorDescription.sensorID), "rU")) self.historyreader.multiple_observations = False Log.i("done") self.historyparser = CSVParser(self, self.historyreader.headers) # connection will be set automatically by the AbstractComposedWrapper to SplitterConnection except Exception as e: Log.e(e) self.historyreader = None super(InternalWrapper, self).setReplayMode(mode)
def setReplayMode(self, mode): if mode: try: Log.i("loading history data for", self.sensorDescription.sensorID, "...") self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "aarhus_parking-%s.csv" % self.sensorDescription.sensorID), "rU"), timestampfield="updatetime") # Must preserve the order as in the CSV but use the names as in senordescription.fields # vehiclecount,updatetime,_id,totalspaces,garagecode,streamtime self.historyparser = CSVParser(self, ["vehicleCount", "updatetime", "_id", "totalSpaces", "garageCode", "st"], timestampfield="updatetime") Log.i("done") except Exception as e: Log.e(e) self.historyreader = None super(InternalWrapper, self).setReplayMode(mode)
def write(self, graphString): if ThreadedTriplestoreAdapter.triplestore: attempts = self.maxAttempts while attempts > 0: try: ThreadedTriplestoreAdapter.triplestore.saveMultipleGraphs(graphString, self.graphName) return except SPARQL_Exception as e: # self.buffer.put(graphString) attempts -= 1 L.e("Error inserting into virtuoso, reattempt") except StoreOffline_Exception as e: L.e( "ThreadedTriplestoreAdapter was unable to save data, store might be offline!") break
def getStreamMinMaxDate(self, graphName, sensorName): queryString = """prefix ssn: <http://purl.oclc.org/NET/ssnx/ssn#> prefix tl: <http://purl.org/NET/c4dm/timeline.owl#> prefix sao: <http://purl.oclc.org/NET/UNIS/sao/sao#> prefix ces: <http://www.insight-centre.org/ces#> prefix so: <http://www.daml.org/services/owl-s/1.2/Service.owl#> prefix qoi: <http://purl.oclc.org/NET/UASO/qoi#> prefix prov: <http://www.w3.org/ns/prov#> SELECT MAX(str(?timeValue)) as ?maxDateTime MIN(str(?timeValue)) as ?minDateTime WHERE { ?observation ssn:observationResultTime ?time . ?observation ssn:observedBy <""" + sensorName + """> . ?time tl:at ?timeValue . }""" sparql = self.getSparqlObject(graphName, queryString) try: ret = sparql.query().convert() return ret except Exception as e: L.e("Error in getStreamMinMaxDate:", e.message)
def _run(self): self.abort.clear() if not self.clock: raise Exception("no clock set?") while not self.abort.is_set(): if self.event.wait(1.0): if not self.abort.is_set(): try: self.update() except Exception as e: L.e("in _run", e.message) finally: self.clock.continue_running() self.event.clear()
def __registerSensorStream(self, sensor_uuid, sensor_id, sercvice_category, geom, epsg): sql = ( "INSERT INTO cp_sensors(sensor_uuid, sensor_annotation_id, sercvice_category, geom) VALUES('%(sensor_uuid)s' , '%(sensor_id)s' , '%(sercvice_category)s' ,ST_Transform(st_setsrid('%(geom)s'::geometry,%(epsg)s),4326))" % {'sensor_uuid': sensor_uuid, 'sensor_id': sensor_id, 'sercvice_category': sercvice_category, 'geom': geom.wkb_hex, 'epsg': epsg}) try: self.curs.execute(sql) self.conn.commit() return True except: self.conn.rollback() L.e("Cannot insert Sensor:", str( {'sensor_uuid': sensor_uuid, 'sensor_id': sensor_id, 'sercvice_category': sercvice_category, 'geom': geom.wkb_hex, 'epsg': epsg})) L.e("SQL query used:", sql) return False
def aggregate(self, data, sensordescription): result = [] try: dftobjs = self.dftobjects[sensordescription.uuid] for f in dftobjs: g = dftobjs[f].control(data[f]) if g: r = JSONObject() r.graph = g r.sensorID = sensordescription.sensorID r.propertyType = sensordescription.field[f].propertyName r.category = sensordescription.sensorType result.append(r) return result except KeyError: Log.e("Dft aggregation failed") return None
def _runReplay(self): self.abort.clear() if not self.clock: raise Exception("no clock set?") while not self.abort.is_set(): if self.event.wait(1.0): if not self.abort.is_set(): try: self.__forEachWrapper("update") except Exception as e: L.e("in _run", e.message) finally: self.clock.continue_running() pass self.event.clear()
def insert_observation(self, sd, ob, q): # L.d("Inserting observation", ob) query = None try: _ob = ob.deepcopy() _ob.fields = filter(lambda x: not self.is_timestamp_field(x, sd), ob.fields) if sd.isTimestampedStream() and sd.timestamp.inField in _ob: _ob.remove_item(sd.timestamp.inField) if 'latency' in _ob: _ob.remove_item('latency') primary_ob_uuid = None p_s_values = [] for _f in ob.fields: if _f not in ob or (sd.isTimestampedStream() and sd.timestamp.inField == _f): continue if not query: field = ob[_f] primary_ob_uuid = self._escape_string(None, str(field.observationID)) v = [ "TIMESTAMP " + self._escape_string(None, field.observationSamplingTime), self._escape_string(None, str(sd.uuid)), primary_ob_uuid, self._escape_string(None, _ob.dumps(), singleqoute_to_double=True), self._escape_string(None, JOb(q).dumps()) ] query = "INSERT INTO %s.cp_observations (%s) VALUES (%s);\n" % (SQL.SCHEMA, ','.join(SQL.cp_observation_fields), ','.join(v)) del v p_s_values.append("(%s, %s)" % (primary_ob_uuid, self._escape_string(None, str(field.observationID)))) if query: query += "INSERT INTO %s.p_s_observation_uuid (main, secondary) VALUES %s;\n" % (SQL.SCHEMA, ','.join(p_s_values)) L.d2("Using query:", query) if query: self.curs.execute(query) self.conn.commit() del query del _ob return True except Exception as e: self.conn.rollback() L.e(e) L.e("SQL query used:", query) return False
def wrapper_added(self, sensordescription): saxobjs = {} for f in sensordescription.fields: field = sensordescription.field[f] if field.dataType == "int" or field.dataType == "float" or field.dataType == "long": if self.config.unit_of_window == 'hours': min_window = timedelta(hours=self.config.minimum_window_length) max_window = timedelta(hours=self.config.maximum_window_length) elif self.config.unit_of_window == 'minutes': min_window = timedelta(minutes=self.config.minimum_window_length) max_window = timedelta(minutes=self.config.maximum_window_length) elif self.config.unit_of_window == 'days': min_window = timedelta(days=self.config.minimum_window_length) max_window = timedelta(days=self.config.maximum_window_length) else: Log.e('Unit of time window is not supported: %s' % self.config.unit_of_window) saxobjs[f] = SensorSaxControl(min_window, max_window, self.config.sensitvity_level, self.config.alphabet_size, self.config.word_length) self.sensorsaxobjects[sensordescription.uuid] = saxobjs
def aggregate(self, data, sensordescription): if not data.fields: # Log.e("There was no data available so it could not be aggregated") return None result = [] try: saxobjs = self.aggregation_objs[sensordescription.uuid] for f in saxobjs: g = saxobjs[f].control(data[f]) if g: r = JSONObject() r.graph = g r.sensorID = sensordescription.sensorID r.propertyType = sensordescription.field[f].propertyName r.category = sensordescription.sensorType result.append(r) del data if result is not None and len(result) is not 0: Log.d2('SaxAggregator: Aggregation successful %s' % str(result)) return result except Exception as e: Log.e("aggregation failed due to Exception", e) return None
def getObservationGraph(self, graphName, sensor, start = None, end = None, asGraph=True): dateFilter = "" if start and end: dateFilter = "FILTER ( (xsd:dateTime(?resultTimeValue) >= xsd:dateTime(\"" + start + "\")) && (xsd:dateTime(?resultTimeValue) <= xsd:dateTime(\"" + end + "\")) ) " elif start: dateFilter = "FILTER ( (xsd:dateTime(?resultTimeValue) >= xsd:dateTime(\"" + start + "\")) ) " queryString = """DEFINE sql:log-enable 2 prefix : <http://stefan.com/> prefix sao: <http://purl.oclc.org/NET/UNIS/sao/sao#> prefix ssn: <http://purl.oclc.org/NET/ssnx/ssn#> prefix tl: <http://purl.org/NET/c4dm/timeline.owl#> CONSTRUCT {?s ?p ?o} where { { ?observation (!:)* ?s . ?s ?p ?o . } { ?observation a sao:Point. ?observation ssn:observationResultTime ?resultTime . ?resultTime tl:at ?resultTimeValue . ?observation ssn:observedBy <""" + sensor + """> .""" + dateFilter + """}}""" sparql = self.getSparqlObject(graphName, queryString) sparql.setReturnFormat("n3") try: ret = sparql.query().convert() if not asGraph: return ret else: g = ConjunctiveGraph() return g.parse(data=ret, format="n3") except Exception as e: L.e("Error in getObservationGraph:", e.message) return None
def addWrapper(self, wrapper): # TODO: this should not be here if ResourceManagement.args.cleartriplestore: self.deleteGraphs(wrapper) sd = wrapper.getSensorDescription() try: if isinstance(sd, list): for _sd in sd: try: _sd.test() if ResourceManagement.args.aggregate: self.aggregator.wrapper_added(_sd) if self.gdiInterface: self.gdiInterface.registerSensorStreamFromWKT( _sd.uuid, _sd.sensorID, _sd.sensorType, _sd.location, _sd.location_epsg or 4326 ) # if self.sql: # self.sql.create_table(_sd) L.i("added wrapper with ID", _sd.sensorID) except Exception as ex: L.e("Error deploying wrapper:", str(ex)) else: try: sd.test() if ResourceManagement.args.aggregate: self.aggregator.wrapper_added(sd) if self.gdiInterface: self.gdiInterface.registerSensorStreamFromWKT( sd.uuid, sd.sensorID, sd.sensorType, sd.location, sd.location_epsg or 4326 ) # if self.sql: # self.sql.create_table(sd) L.i("added wrapper with ID", sd.sensorID) except Exception as ex: L.e("Error deploying wrapper:", str(ex)) if ResourceManagement.args.triplestore or ResourceManagement.args.messagebus: # StaticAnnotator.staticAnnotationSensor(wrapper, self.config, self.messageBusQueue, self.rabbitmqchannel) StaticAnnotator.threadedStaticAnnotationSensor(wrapper, self.config, self.messageBusQueue, self.ui.api) if ResourceManagement.args.messagebus: wrapper.setMessageBusQueue(self.messageBusQueue) self.wrappers.append(wrapper) except Exception as ex: L.e(self.__class__.__name__, "Error in addWrapper:", str(ex))
def get_observations(self, uuid, start=None, end=None, format='json', onlyLast=False, fields=None, offset=0): from virtualisation.resourcemanagement.resourcemanagement import ResourceManagement w = self.rm.getWrapperByUUID(uuid) if not w: return None sd = w.getSensorDescription() # prepare query _filter = ["sensor_uuid = '%s'" % uuid] order = "ORDER BY sampling_time" limitation = "" if onlyLast: order += " DESC" else: if start: _filter.append("sampling_time >= TIMESTAMP '%s'" % start) if end: _filter.append("sampling_time <= TIMESTAMP '%s'" % end) _filter = "WHERE " + " and ".join(_filter) if fields: fields = fields.split(',') fields_ = [] for ft in fields: fields_.append("data->'%s' AS %s" % (ft, ft)) fields_.append("quality") else: fields_ = SQL.cp_observation_fields limitation = "LIMIT %d" % (1 if onlyLast else self.PAGINATION_LIMIT) query = "SELECT %s FROM %s.cp_observations %s %s %s OFFSET %d;" % (",".join(fields_), SQL.SCHEMA, _filter, order, limitation, offset) # query = "SELECT %s FROM %s.cp_observations %s %s;" % (",".join(fields_), SQL.SCHEMA, _filter, order) L.d("SQL: executing query", query) try: # need a new cursor object to no interfere with the state of the class's inserting cursor cursor = self.conn.cursor() cursor.execute(query) data = cursor.fetchall() data2 = [list(x) for x in data] del data if format in ('n3', 'nt', 'xml', 'turtle', 'pretty-xml', 'trix'): if ResourceManagement.args.messagebus or ResourceManagement.args.triplestore: if fields: observations = [] qualities = [] for x in data2: tmp = JOb() for i in range(0, len(fields)): ft = fields[i] tmp[ft] = JOb(x[i]) tmp.fields = fields observations.append(tmp) qualities.append(JOb(x[-1])) else: observations = [JOb(x[3]) for x in data2] qualities = [JOb(x[4]) for x in data2] g = self.rm.annotator.annotateObservation(observations, sd, None, qualities) del observations del qualities del query return g.serialize(format=format) else: return "Error: requires messagebus or triplestore to be enabled" else: # search in all columns in each row for a datetime.datetime and parse it for i in range(0, len(data2)): data2[i] = map(lambda x: x.strftime("%Y-%m-%d %H:%M:%S") if isinstance(x, datetime.datetime) else x, data2[i]) json_list = [] for x in data2: if fields: # y = JOb({}) y = {} for i in range(0, len(fields)): # ft = fields[i] # y[ft] = JOb(x[i]) y[fields[i]] = x[i] # y.quality = JOb(x[-1]) # y.fields = fields y["fields"] = fields y["quality"] = x[-1] else: # y = JOb(x[3]) # y.quality = JOb(x[4]) y = x[3] y["quality"] = x[4] json_list.append(y) del query del data2 # return JOb(json_list).dumps() return json_list except Exception as e: L.e("SQL:", e) L.e("SQL query used:", query) return "Error: " + str(e)