def __init__(self, gdi_config, rm):
        self.rm = rm
        self.PAGINATION_LIMIT = 100
        connect_str = "host='%s' dbname='%s' user='******' password='******' port=%d" % (
            gdi_config.host, gdi_config.database, gdi_config.username, gdi_config.password, gdi_config.port)
        self.conn = psycopg2.connect(connect_str)
        self.curs = self.conn.cursor()
        try:
            self.curs.execute("CREATE SCHEMA IF NOT EXISTS %s;" % SQL.SCHEMA)
            # self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % (SQL.SCHEMA,))
            self.curs.execute("CREATE TABLE IF NOT EXISTS %s.cp_sensors (sensor_uuid UUID CONSTRAINT uuid_key PRIMARY KEY, sensor_annotation_id VARCHAR, sercvice_category VARCHAR, traffic INTEGER, geom GEOMETRY(GEOMETRY, 4326) );" % ("public",))
            cols = ["sampling_time TIMESTAMP", "sensor_uuid UUID", "observation_uuid UUID", "data JSON", "quality JSON"]
            query = 'CREATE TABLE IF NOT EXISTS %s.cp_observations ( %s, PRIMARY KEY (%s), FOREIGN KEY (sensor_uuid) REFERENCES %s.cp_sensors(sensor_uuid));\n' % (SQL.SCHEMA, ', '.join(cols),  ", ".join(["observation_uuid"]), "public")
            self.curs.execute(query)

            # index over sampling_time and sensor_uuid
            # since a 'IF NOT EXISTS' is not available for us (version < 9.5)
            # the error is catched in a separate try-catch
            try:
                query = 'CREATE INDEX "timeindex" ON %s.cp_observations USING btree (sampling_time);' % (SQL.SCHEMA,)
                self.curs.execute(query)
                query = 'CREATE INDEX uuidindex ON %s.cp_observations USING btree (sensor_uuid);' % (SQL.SCHEMA,)
                self.curs.execute(query)
            except:
                pass

            # primary secondary observation_uuid map
            query = 'CREATE TABLE IF NOT EXISTS %s.p_s_observation_uuid (main UUID, secondary UUID);' % (SQL.SCHEMA,)
            self.curs.execute(query)

            self.conn.commit()
            L.i("SQL: schema/tables created")
        except Exception as e:
            L.e("SQL: Could not create schema/tables", e)
            self.conn.rollback()
    def start(self, restart=False):
        self._startQueues()
        if self.clock:
            self.clock.stop()
        self.clock = RealClock(self.end)

        if ResourceManagement.args.pt:
            from virtualisation.resourcemanagement.performancetestreceiver import PerformanceMeterMinutes

            performancetest = PerformanceMeterMinutes()

        for w in self.wrappers:
            self.startWrapper(w, restart)
            if ResourceManagement.args.pt:
                w.addReceiver(performancetest)
        L.i(datetime.datetime.now())

        if not self.args.noQuality:
            if not self.averageStreamQuality:
                self.averageStreamQuality = AverageStreamQuality(self, self.clock)
            else:
                self.averageStreamQuality.setClock(self.clock)

        self.clock.runAsync()

        raw_input("press Enter to end.\n")
        self.clock.stop()
    def deploy(self, f, autostart=False):
        """

        :param f:
        :param autostart:
        :return: a tuple with 3 elements. 1. status as string, 2. error message as string, 3. list of uuids of added wrapper
        """
        L.i("Deploying", f)
        sensordescriptions = []
        try:
            zFile = zipfile.ZipFile(f)
            if "deploy.json" in zFile.namelist():
                deployDescription = JOb(zFile.open("deploy.json", "r"))
                sys.path.insert(0, f)
                if deployDescription.isList():
                    for dd in deployDescription:
                        module = __import__(dd.module)
                        wrapper = getattr(module, dd["class"])()
                        self.addWrapper(wrapper)
                        sensordescriptions.append(wrapper.getSensorDescription())
                        if autostart:
                            self.startWrapper(wrapper)
                else:
                    module = __import__(deployDescription.module)
                    wrapper = getattr(module, deployDescription["class"])()
                    self.addWrapper(wrapper)
                    sensordescriptions.append(wrapper.getSensorDescription())
                    if autostart:
                        self.startWrapper(wrapper)
            return "OK", "", sensordescriptions
        except Exception as e:
            L.w("Deployment of wrapper", f, "failed.", e.message)
            return "Fail", e.message, []
    def __run(self):
        while not self.stop:
            sleep(self.delay)
            Log.i(self.counter, self.txt)
#             print self.counter, self.txt
#             print "ThreadedTriplestoreAdapter Buffer Size:", ThreadedTriplestoreAdapter.getTotalBufferSize()
            Log.i("ThreadedTriplestoreAdapter Buffer Size:", ThreadedTriplestoreAdapter.getTotalBufferSize())
            self.counter = 0
 def setTimeframe(self, startdate, enddate):
     if not isinstance(self.data, list):
         Log.i("Searching start date in historic data for", self.wrapper.getSensorDescription().sensorID, "...")
         if self.data.scrollTo(startdate):
             Log.i("done")
         else:
             Log.w("no historic data beginning at", startdate, "found")
     super(CSVHistoryReader, self).setTimeframe(startdate, enddate)
 def setReplayMode(self, mode):
     if mode:
         try:
             Log.i("loading history for", self.sensorDescription.sensorID, "...")
             self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "trafficData%d.csv" % self.sensorDescription.sensorID), "rU"))
             self.historyreader.multiple_observations = False
             Log.i("done")
             self.historyparser = CSVParser(self, self.historyreader.headers)
             # connection will be set automatically by the AbstractComposedWrapper to SplitterConnection
         except Exception as e:
             Log.e(e)
             self.historyreader = None
     super(InternalWrapper, self).setReplayMode(mode)
 def end(self):
     for w in self.wrappers:
         w.stop()
     if ResourceManagement.args.messagebus:
         self.messageBusQueue.stop()
     if ResourceManagement.args.aggregate:
         self.aggregationQueue.stop()
     if self.eventWrapper:
         self.eventWrapper.stop()
     self.receiverQueue.stop()
     if ResourceManagement.args.triplestore:
         ThreadedTriplestoreAdapter.stop()
     self.stopInterface()
     L.i("REPLAY ENDED")
    def setReplayMode(self, mode):
        if mode:
            try:
                Log.i("loading history data for", self.sensorDescription.sensorID, "...")
                self.historyreader = CSVHistoryReader(self, AbstractWrapper.getFileObject(__file__, os.path.join("historicdata", "aarhus_parking-%s.csv" % self.sensorDescription.sensorID), "rU"), timestampfield="updatetime")
                # Must preserve the order as in the CSV but use the names as in senordescription.fields
                # vehiclecount,updatetime,_id,totalspaces,garagecode,streamtime
                self.historyparser = CSVParser(self, ["vehicleCount", "updatetime", "_id", "totalSpaces", "garageCode", "st"], timestampfield="updatetime")
                Log.i("done")

            except Exception as e:
                Log.e(e)
                self.historyreader = None
        super(InternalWrapper, self).setReplayMode(mode)
    def addWrapper(self, wrapper):
        # TODO: this should not be here
        if ResourceManagement.args.cleartriplestore:
            self.deleteGraphs(wrapper)
        sd = wrapper.getSensorDescription()
        try:
            if isinstance(sd, list):
                for _sd in sd:
                    try:
                        _sd.test()
                        if ResourceManagement.args.aggregate:
                            self.aggregator.wrapper_added(_sd)
                        if self.gdiInterface:
                            self.gdiInterface.registerSensorStreamFromWKT(
                                _sd.uuid, _sd.sensorID, _sd.sensorType, _sd.location, _sd.location_epsg or 4326
                            )
                        # if self.sql:
                        #     self.sql.create_table(_sd)
                        L.i("added wrapper with ID", _sd.sensorID)
                    except Exception as ex:
                        L.e("Error deploying wrapper:", str(ex))
            else:
                try:
                    sd.test()
                    if ResourceManagement.args.aggregate:
                        self.aggregator.wrapper_added(sd)
                    if self.gdiInterface:
                        self.gdiInterface.registerSensorStreamFromWKT(
                            sd.uuid, sd.sensorID, sd.sensorType, sd.location, sd.location_epsg or 4326
                        )
                    # if self.sql:
                    #     self.sql.create_table(sd)
                    L.i("added wrapper with ID", sd.sensorID)
                except Exception as ex:
                    L.e("Error deploying wrapper:", str(ex))

            if ResourceManagement.args.triplestore or ResourceManagement.args.messagebus:
                # StaticAnnotator.staticAnnotationSensor(wrapper, self.config, self.messageBusQueue, self.rabbitmqchannel)
                StaticAnnotator.threadedStaticAnnotationSensor(wrapper, self.config, self.messageBusQueue, self.ui.api)
            if ResourceManagement.args.messagebus:
                wrapper.setMessageBusQueue(self.messageBusQueue)
            self.wrappers.append(wrapper)
        except Exception as ex:
            L.e(self.__class__.__name__, "Error in addWrapper:", str(ex))
    def tick(self):
#         Log.d2("AbstractClock tick")
        self.dayPrintCounter -= 1
        if self.dayPrintCounter == 0:
            self.dayPrintCounter = AbstractClock.secondsInADay
            Log.i("It is", self.now())
            print "It is", self.now()

#         Log.d2("AbstractClock notification handling")
        notifications = filter(self.filtermethod, self.notifications.copy().iteritems())
        self.pause(len(notifications))
        for k, v in notifications:
            v.do()
        del notifications
#         Log.d2("AbstractClock notification handling finished")
#         Log.d2("AbstractClock job")
        jobs = filter(self.filtermethod, self.jobs.copy().iteritems())
        for k, v in jobs:
            if self.execute_jobs_async:
                threading.Thread(target=v.do).start()
            else:
                v.do()
        del jobs
#         Log.d2("AbstractClock job handling finished")

#         Log.d2("AbstractClock delete self.notifications/jobs")
        self.lock.acquire()
        notifications = filter(self.removefiltermethod, self.notifications.copy().iteritems())
        for k, v in notifications:
            del self.notifications[k]
        jobs = filter(self.removefiltermethod, self.jobs.copy().iteritems())
        for k, v in jobs:
            if k in self.jobs:
                del self.jobs[k]
#         Log.d2("AbstractClock delete finished")    
        
        # tmp = filter(self.removefiltermethod, self.notifications.copy().iteritems())
        # del self.notifications
        # self.notifications = tmp
        # tmp = filter(self.removefiltermethod, self.jobs.copy().iteritems())
        # del self.jobs
        # self.jobs = tmp
        self.lock.release()
    def start_messagebus(self, args):
        L.i("Connecting to the message bus")
        self.messageBusQueue = QueueThread(handler=self.sendMessageHandler)
        try:

            # prepare RabbitMQ configuration
            rmq_host = str(self.config.rabbitmq.host)
            rmq_port = self.config.rabbitmq.port
            rmq_username = self.config.rabbitmq.username if "username" in self.config.rabbitmq else None
            rmq_password = self.config.rabbitmq.username if "password" in self.config.rabbitmq else None
            if rmq_username:
                if rmq_password:
                    RabbitMQ.establishConnection(rmq_host, rmq_port, rmq_username, rmq_password)
                else:
                    RabbitMQ.establishConnection(rmq_host, rmq_port, rmq_username)
            else:
                RabbitMQ.establishConnection(rmq_host, rmq_port)
            L.i("Connected to the message bus")
            self.messageBusQueue.start()
        except MessageBusConnectionError:
            self.args.messagebus = False
            args.messagebus = False
            L.w("Could not connect to MessageBus server. Disabling MessageBus feature.")
    def startReplay(self):
        # cherrypy.tree.mount(dowser.Root(), '/dowser')
        self._startQueues()
        method = self.replayEnd
        args = None

        start_time = datetime.datetime.now()

        # if continuelive enabled set "start" as method to set the system to live mode if historic replay is finished
        if ResourceManagement.args.continuelive:
            method = self.start
            args = True

        if ResourceManagement.args.speed:
            self.clock = ReplayClock(ResourceManagement.args.speed, endCallback=method, endCallbackArgs=args)
        else:
            self.clock = ReplayClock(endCallback=method)

        if ResourceManagement.args.end and ResourceManagement.args.start:
            try:
                startDate = datetime.datetime.strptime(ResourceManagement.args.start, ReplayClock.parserformat)
                endDate = datetime.datetime.strptime(ResourceManagement.args.end, ReplayClock.parserformat)
                # for w in self.wrappers:
                #     w.setTimeframe(startDate, endDate)
                if startDate > endDate:
                    L.w("start date after end date. Changing both")
                    tmp = endDate
                    endDate = startDate
                    startDate = tmp
                self.clock.setTimeframe(startDate, endDate)
            except Exception as e:
                L.e("Problem parsing start- or end date:", str(e))
                raise e

        else:
            raise Exception("start- and enddate required for replay mode")

        if ResourceManagement.args.pt:
            from virtualisation.resourcemanagement.performancetestreceiver import PerformanceMeterMinutes

            performancetest = PerformanceMeterMinutes()  # PerformanceMeterSeconds()

        for w in self.wrappers:
            w.setReplayMode(True)
            w.setClock(self.clock)
            w.setTimeframe(startDate, endDate)
            w.addReceiver(self)
            if ResourceManagement.args.pt:
                w.addReceiver(performancetest)
            w.start()
            w.runReplay()

        if not self.args.noQuality:
            if not self.averageStreamQuality:
                self.averageStreamQuality = AverageStreamQuality(self, self.clock)
            else:
                self.averageStreamQuality.setClock(self.clock)
        self.clock.runAsync()
        self.startMonitor()

        if not ResourceManagement.args.continuelive:
            raw_input("press Enter to end.\n")
            self.clock.stop()
            L.i("Runtime", datetime.datetime.now() - start_time)
    def update(self):
        from virtualisation.resourcemanagement.resourcemanagement import ResourceManagement

        # print "time", self.clock.now()
        latStart = datetime.now()
        L.d("processing:", self.getSensorDescription().sensorID)
        # L.d(self.clock.now())
        if self.replaymode:
            self.stats.startMeasurement("Update_replay")
            #             self.clock.pause()
            if self.historyreader:
                L.d2("abstractwrapper get data")
                self.stats.startMeasurement("Update_replay.Historyreader")
                data_raw = self.historyreader.tick(self.clock)
                self.stats.stopMeasurement("Update_replay.Historyreader")
                L.d2("abstractwrapper received data:", str(data_raw))
                if data_raw:
                    data_list = [data_raw] if not self.historyreader.multiple_observations else data_raw
                    for data in data_list:
                        try:
                            L.d2("abstractwrapper parse data")
                            # print "data to parse", data
                            self.stats.startMeasurement("Update_replay.Historyparser")
                            parsed = self.historyparser.parse(data, self.clock)
                            self.stats.stopMeasurement("Update_replay.Historyparser")
                            L.d2("abstractwrapper parsed data:", str(parsed))
                            del data
                            if parsed:
                                self.stats.startMeasurement("Update_replay.Preparation")
                                ObservationIDGenerator.addObservationIDToFields(parsed)
                                parsed.producedInReplayMode = True
                                parsed.recovered = False
                                parsed.latency = (datetime.now() - latStart).total_seconds()
                                self.stats.stopMeasurement("Update_replay.Preparation")

                                # QoI Start
                                quality = None
                                if self.qoiSystem:
                                    L.d2("abstractwrapper get quality")
                                    self.stats.startMeasurement("Update_replay.Quality")
                                    quality = self.qoiSystem.addData(self.getSensorDescription(), parsed, self.clock)
                                    self.stats.stopMeasurement("Update_replay.Quality")
                                    L.d2("abstractwrapper quality:", quality)
                                if self.faultRecoveryActive:
                                    L.d2("abstractwrapper update fault recovery")
                                    self.stats.startMeasurement("Update_replay.FaultRecoveryUpdate")
                                    self.updateFaultRecoveries(parsed, quality)
                                    self.stats.stopMeasurement("Update_replay.FaultRecoveryUpdate")
                                    L.d2("abstractwrapper fault recovery updated")

                                self.stats.startMeasurement("Update_replay.Receiver")
                                for r in self.receiver:
                                    L.d2("abstractwrapper start receiver", r)
                                    r.receive(parsed, self.getSensorDescription(), self.clock, quality)
                                    L.d2("abstractwrapper receiver", r, "finished")
                                self.stats.stopMeasurement("Update_replay.Receiver")
                        except Exception as e:
                            L.e("Error while updating sensor", self.getSensorDescription().fullSensorID, e)
                        finally:
                            if ResourceManagement.args.gentle:
                                self.clock.sleep()
                else:
                    L.d("there is no data, ask fault recovery1")
                    # L.i(self.getSensorDescription().sensorID)
                    # L.i(self.clock.now())
                    try:
                        self.stats.startMeasurement("Update_replay.Recovery")
                        data = JSONObject()
                        data.latency = 0
                        data.producedInReplayMode = True
                        data.recovered = True

                        data.fields = []
                        for n in self.getSensorDescription().fields:
                            if n in self.faultRecoveries and self.faultRecoveries[n].isReady():
                                data.fields.append(n)
                                data[n] = JSONObject()
                                # at this point the dataType is in FAULT_RECOVERY_SUPPORTED_DATATYPES and we can safely use cast
                                data[n].value = self.faultRecoveryCast(
                                    self.faultRecoveries[n].getEstimation(),
                                    self.getSensorDescription().field[n].dataType,
                                )
                                data[n].propertyName = self.getSensorDescription().field[n].propertyName
                                data[n].propertyURI = self.getSensorDescription().field[n].propertyURI
                                if "unit" in self.getSensorDescription().field[n]:
                                    data[n].unit = self.getSensorDescription().field[n].unit
                                data[n].sensorID = self.getSensorDescription().fullSensorID
                                data[n].observationSamplingTime = self.clock.timeAsString()
                                data[n].observationResultTime = data[n].observationSamplingTime
                        self.stats.stopMeasurement("Update_replay.Recovery")

                        self.stats.startMeasurement("Update_replay.ObservationIDGenerator")
                        ObservationIDGenerator.addObservationIDToFields(data)
                        self.stats.stopMeasurement("Update_replay.ObservationIDGenerator")

                        quality = None
                        if self.qoiSystem:
                            self.stats.startMeasurement("Update_replay.Quality")
                            quality = self.qoiSystem.addData(self.getSensorDescription(), data, self.clock)
                            self.stats.stopMeasurement("Update_replay.Quality")

                        self.stats.startMeasurement("Update_replay.Receiver")
                        for r in self.receiver:
                            r.receive(data, self.getSensorDescription(), self.clock, quality)
                        self.stats.stopMeasurement("Update_replay.Receiver")
                    except Exception as e:
                        L.e("Error while updating sensor", self.getSensorDescription().fullSensorID, e)
                    finally:
                        pass
                        # if ResourceManagement.args.gentle:
                        #     self.clock.sleep()
            else:
                pass  # no history reader - nothing to do
            self.stats.stopMeasurement("Update_replay")
        else:  # no replay mode
            self.stats.startMeasurement("Update_live")
            if self.connection:
                try:
                    self.stats.startMeasurement("Update_live.Connection")
                    data_raw = self.connection.next()
                    self.stats.stopMeasurement("Update_live.Connection")
                    if data_raw:
                        data_list = [data_raw] if not self.connection.multiple_observations else data_raw
                        for data in data_list:
                            self.stats.startMeasurement("Update_live.Parser")
                            parsed = self.parser.parse(data, self.clock)
                            self.stats.stopMeasurement("Update_live.Parser")
                            if parsed:
                                self.stats.startMeasurement("Update_live.Preparation")
                                ObservationIDGenerator.addObservationIDToFields(parsed)
                                parsed.producedInReplayMode = False
                                parsed.recovered = False
                                parsed.latency = (datetime.now() - latStart).total_seconds()
                                self.stats.stopMeasurement("Update_live.Preparation")

                                # QoI Start
                                quality = None
                                if self.qoiSystem:
                                    # TODO update the timestamp
                                    self.stats.startMeasurement("Update_live.Quality")
                                    quality = self.qoiSystem.addData(self.getSensorDescription(), parsed, self.clock)
                                    self.stats.stopMeasurement("Update_live.Quality")
                                if self.faultRecoveryActive:
                                    L.d2("abstractwrapper update fault recovery")
                                    self.stats.startMeasurement("Update_live.FaultRecoveryUpdate")
                                    self.updateFaultRecoveries(parsed, quality)
                                    self.stats.stopMeasurement("Update_live.FaultRecoveryUpdate")
                                    L.d2("abstractwrapper fault recovery updated")

                                self.stats.startMeasurement("Update_live.Receiver")
                                for r in self.receiver:
                                    r.receive(parsed, self.getSensorDescription(), self.clock, quality)
                                self.stats.stopMeasurement("Update_live.Receiver")
                    else:
                        # fault recovery
                        L.i("there is no data, ask fault recovery2")
                        try:
                            self.stats.startMeasurement("Update_live.Recovery")
                            data = JSONObject()
                            data.latency = 0
                            data.recovered = True
                            data.fields = []
                            for n in self.getSensorDescription().fields:
                                if n in self.faultRecoveries and self.faultRecoveries[n].isReady():
                                    data.fields.append(n)
                                    data[n] = JSONObject()
                                    data[n].value = self.faultRecoveryCast(
                                        self.faultRecoveries[n].getEstimation(),
                                        self.getSensorDescription().field[n].dataType,
                                    )
                                    data[n].propertyName = self.getSensorDescription().field[n].propertyName
                                    data[n].propertyURI = self.getSensorDescription().field[n].propertyURI
                                    if "unit" in self.getSensorDescription().field[n]:
                                        data[n].unit = self.getSensorDescription().field[n].unit
                                    data[n].sensorID = self.getSensorDescription().fullSensorID
                                    data[n].observationSamplingTime = self.clock.timeAsString()
                                    data[n].observationResultTime = data[n].observationSamplingTime
                            self.stats.stopMeasurement("Update_live.Recovery")

                            ObservationIDGenerator.addObservationIDToFields(data)
                            quality = None
                            if self.qoiSystem:
                                self.stats.startMeasurement("Update_live.Quality")
                                quality = self.qoiSystem.addData(self.getSensorDescription(), data, self.clock)
                                self.stats.stopMeasurement("Update_live.Quality")

                            self.stats.startMeasurement("Update_live.Receiver")
                            for r in self.receiver:
                                r.receive(data, self.getSensorDescription(), self.clock, quality)
                            self.stats.stopMeasurement("Update_live.Receiver")
                        except Exception as e:
                            L.e(
                                "Error while updating sensor (fault recovery)",
                                self.getSensorDescription().fullSensorID,
                                str(e),
                            )
                        finally:
                            pass
                            # if ResourceManagement.args.gentle:
                            #     self.clock.sleep()
                except Exception as e:
                    L.e(
                        "Error while updating sensor (not fault recovery)",
                        self.getSensorDescription().fullSensorID,
                        str(e),
                    )
            else:
                pass  # no live mode supported
            self.stats.stopMeasurement("Update_live")