def test_overflow(self):
        global DATAPOINT_INSERT_LIMIT
        dil = DATAPOINT_INSERT_LIMIT
        DATAPOINT_INSERT_LIMIT = 2
        s = self.device["mystream"]

        # This time we test existing stream
        s.create({"type": "string"})

        l = Logger("test.db")
        l.serverurl = TEST_URL
        l.apikey = self.apikey

        l.addStream("mystream")

        l.insert("mystream", "test1")
        l.insert("mystream", "test2")
        l.insert("mystream", "test3")

        l.sync()

        self.assertEqual(3, len(s))
        self.assertEqual(0, len(l))

        DATAPOINT_INSERT_LIMIT = dil
    def test_clear(self):
        s = self.device["mystream"]

        # This time we test existing stream
        s.create({"type": "string"})

        l = Logger("test.db")
        l.serverurl = TEST_URL
        l.apikey = self.apikey

        l.addStream("mystream")

        l.insert("mystream", "test1")
        l.insert("mystream", "test2")

        l.cleardata()
        self.assertEqual(len(l), 0)
    def test_bgsync(self):
        s = self.device["mystream"]

        # This time we test existing stream
        s.create({"type": "string"})

        l = Logger("test.db")
        l.serverurl = TEST_URL
        l.apikey = self.apikey

        l.addStream("mystream")

        l.syncperiod = 1

        self.assertEqual(0, len(s))
        self.assertEqual(0, len(l))

        l.start()
        l.insert("mystream", "hi")
        l.insert("mystream", "hello")
        self.assertEqual(0, len(s))
        self.assertEqual(2, len(l))
        time.sleep(1.1)
        self.assertEqual(2, len(s))
        self.assertEqual(0, len(l))
        l.insert("mystream", "har")
        self.assertEqual(2, len(s))
        self.assertEqual(1, len(l))
        time.sleep(1.1)
        self.assertEqual(3, len(s))
        self.assertEqual(0, len(l))
        l.stop()

        l.insert("mystream", "stopped")
        time.sleep(1.3)
        self.assertEqual(3, len(s))
        self.assertEqual(1, len(l))

        l.close()
class LaptopLogger():
    def __init__(self,firstrun_callback=None):
        self.firstrun_callback = firstrun_callback

        self.syncer = None
        self.isrunning = False
        self.issyncing = False

        #Get the data gatherers. currentgatherers is the ones that are set to ON
        # gatherers is ALL of them
        self.currentgatherers = {}
        self.gatherers = {}
        for p in getplugins():
            g = p()
            self.currentgatherers[g.streamname] = g
            self.gatherers[g.streamname] = g

        filedir = files.getFileFolder()
        cachefile = os.path.join(filedir,"cache.db")
        logging.info("Opening database " + cachefile)
        self.cache = Logger(cachefile,on_create=self.create_callback)

        # Disable the relevant gatherers
        for g in self.cache.data["disabled_gatherers"]:
            if g in self.currentgatherers:
                del self.currentgatherers[g]

        # If ConnectorDB is managed, start the executable
        self.localdir = os.path.join(filedir,"db")
        self.localrunning = False
        self.runLocal()

        #Start running the logger if it is supposed to be running
        if self.cache.data["isrunning"]:
            self.start()
        if self.cache.data["isbgsync"]:
            self.startsync()

    # This can be used to start a local version of ConnectorDB
    def runLocal(self):
        if self.cache.data["runlocal"] and not self.localrunning:
            logging.info("Starting ConnectorDB server")
            try:
                self.localrunning = True
                retcode = cdbmanager.Manager(self.localdir).start()
                # The method needed to start on windows doesn't return error codes.
                if (platform.system()=="Windows"):
                    return True
                logging.debug("Start return code: " +str(retcode))
                return retcode==0
            except Exception as e:
                logging.error(str(e))
            self.localrunning = False
            return False
        return False

    def create_callback(self,c):
        logging.info("Creating new cache file...")

        c.data = {
            "runlocal": False,      # Whether or not to run a local ConnectorDB instance (the ConnectorDB server)
            "isrunning": False,    # Whether or not the logger is currently gathering data. This NEEDS to be false - it is set to true later
            "isbgsync": False,      # Whether or not the logger automatically syncs with ConnectorDB. Needs to be false - automatically set to True later
            "gathertime": 4.0,     # The logger gathers datapoints every this number of seconds
            "disabled_gatherers": [], # The names of disabled gatherers
        }
        c.syncperiod = 60*60    # Sync once an hour

        #We now need to set the API key
        if self.firstrun_callback is not None:
            self.firstrun_callback(c)

    def removegatherer(self,g):
        logging.info("Removing gatherer " + g)
        if g in self.currentgatherers:
            del self.currentgatherers[g]
            if self.isrunning:
                self.gatherers[g].stop()
        # Save the setting
        d = self.cache.data
        if not g in d["disabled_gatherers"]:
            d["disabled_gatherers"].append(g)
            self.cache.data = d

    def addgatherer(self,g):
        logging.info("Adding gatherer " + g)
        if not g in self.currentgatherers:
            if self.isrunning:
                self.gatherers[g].start(self.cache)
            self.currentgatherers[g] = self.gatherers[g]
        # Save the setting
        d = self.cache.data
        if g in d["disabled_gatherers"]:
            d["disabled_gatherers"].remove(g)
            self.cache.data = d



    def gather(self):
        for g in self.currentgatherers:
            self.currentgatherers[g].run(self.cache)

        self.syncer = threading.Timer(self.cache.data["gathertime"],self.gather)
        self.syncer.daemon = True
        self.syncer.start()

    # Whether or not to run data gathering
    def start(self):
        if not self.isrunning:
            logging.info("Start acquisition")
            d = self.cache.data
            d["isrunning"] = True
            self.cache.data = d

            #First, make sure all streams are ready to go in the cache
            for g in self.gatherers:
                if not g in self.cache:
                    gatherer = self.gatherers[g]
                    logging.info("Adding {} stream ({})".format(g,self.gatherers[g].streamschema))
                    nickname = ""
                    if hasattr(gatherer,"nickname"):
                        nickname = gatherer.nickname
                    datatype = ""
                    if hasattr(gatherer,"datatype"):
                        datatype = gatherer.datatype
                    self.cache.addStream(g,gatherer.streamschema,description=gatherer.description,nickname=nickname,datatype=datatype)

            for g in self.currentgatherers:
                self.currentgatherers[g].start(self.cache)

            self.isrunning = True

            self.gather()

    # Whether or not to run background syncer
    def startsync(self):
        if not self.issyncing:
            logging.info("Start background sync")
            d = self.cache.data
            d["isbgsync"] = True
            self.cache.data = d
            self.cache.start()
            self.issyncing = True


    def stop(self,temporary=False):
        logging.info("Stop acquisition")

        if self.syncer is not None:
            self.syncer.cancel()
            self.syncer = None

        for g in self.currentgatherers:
            self.currentgatherers[g].stop()

        if not temporary:
            d = self.cache.data
            d["isrunning"] = False
            self.cache.data = d

        self.isrunning = False

    def stopsync(self):
        self.cache.stop()
        d = self.cache.data
        d["isbgsync"] = False
        self.cache.data = d
        self.issyncing= False

    def exit(self):
        # exit performs cleanup - in this case, shutting down the ConnectorDB database on exit
        if self.cache.data["runlocal"] and self.localrunning:
            logging.info("Shutting down ConnectorDB server")
            try:
                cdbmanager.Manager(self.localdir).stop()
                self.localrunning = False
            except:
                pass