Exemple #1
1
    def get_longtime_data(self, args):
        """
        get values from RAW Archive

        parameters:
        /<str>project/<str>tablename/<str>datestring/<str>key/<str>value_keys

        keyids=hostname:srvszp2orb.tilak.cc means
        this is only useful if keyids are unique

        return data like this:
        [
            {
                name: "name of this series" usually this is the counter name
                data : [[ts, value], ...]
            },
            ...
        ]
        """
        assert len(args) == 5
        project, tablename, monthstring, keys_str, value_key = args
        if len(monthstring) > 7:
            return "monthstring, has to be in YYYY-MM format"
        # key_str should be a tuple string, convert to unicode tuple
        keys = tuple([unicode(key_value) for key_value in eval(base64.b64decode(keys_str))])
        logging.info("project : %s", project)
        logging.info("tablename : %s", tablename)
        logging.info("monthstring : %s", monthstring)
        logging.info("keys : %s", keys)
        logging.info("value_keys : %s", value_key)
        datalogger = DataLogger(basedir, project, tablename)
        data = datalogger.get_tsastats_longtime_hc(monthstring, keys, value_key)
        #logging.info("got data: %s", data)
        hc_data = [{"name" : funcname, "data" : data[funcname]} for funcname in data.keys()]
        return json.dumps(hc_data)
Exemple #2
0
    def get_monthly_ts(self, project, tablename, monthstring, args):
        """
        get monthly statistical values

        TODO: should be combined with get_lt_ts
        """
        index_key_enc = None
        value_keyname = None
        stat_func_name = "avg"
        if len(args) == 2:
            index_key_enc, value_keyname = args
        else:
            index_key_enc, value_keyname, stat_func_name = args
        if len(monthstring) != 7:
            web.internalerror()
            return "monthstring, has to be in YYYY-MM format"
        # key_str should be a tuple string, convert to unicode tuple
        index_key = tuple([unicode(key_value) for key_value in eval(base64.b64decode(index_key_enc))])
        logging.info("index_key : %s", index_key)
        logging.info("value_keyname : %s", value_keyname)
        logging.info("stat_func_name: %s", stat_func_name)
        datalogger = DataLogger(basedir, project, tablename)
        filterkeys = dict(zip(datalogger.index_keynames, index_key))
        ret_data = []
        for datestring in datalogger.monthwalker(monthstring):
            logging.debug("getting tsatstats for %s", monthstring)
            try:
                tsastats = datalogger.load_tsastats(datestring, filterkeys=filterkeys)
                ret_data.append([datestring, tsastats[index_key][value_keyname][stat_func_name]])
            except DataLoggerRawFileMissing as exc:
                logging.error("No Input File for datestring %s found, skipping this date", datestring)
            except DataLoggerLiveDataError as exc:
                logging.error("Reading from live data is not allowed, skipping this data, and ending loop")
                break
        return json.dumps(ret_data)
Exemple #3
0
    def get_quantile_web(self, args):
        """
        return exported QuantileArray json formatted, special
        version for use in webpages to render with tablesorter

        in difference to get_quantile the value_keyname has to be given

        ex: Datalogger/get_quantile/{projectname}/{tablename}/{datestring}

        [
            dict of index_keys : dict of quantile,
            list of index_keys,
            list of value_names,
        ]

        returns:
        json(quantile_dict)
        """
        project, tablename, datestring, value_keyname = args[:4]
        datalogger = DataLogger(basedir, project, tablename)
        qa = datalogger.load_quantile(datestring)
        ret_data = []
        # build header
        ret_data.append(list(datalogger.index_keynames) + ["Q0", "Q1", "Q2", "Q3", "Q4"])
        # data part
        for k, v  in qa[value_keyname].quantile.items():
            ret_data.append(list(k) + v.values())
        return json.dumps(ret_data)
Exemple #4
0
def archive(project, tablename, datestring):
    datalogger = DataLogger(basedir, project, tablename)
    caches = datalogger.get_caches(datestring)
    suffix = "%s/%s/%s\t" % (datestring, project, tablename)
    if caches["tsa"]["raw"] is None:
        logging.debug("%s RAW Data not found", suffix)
    else:
        if not os.path.isfile(caches["tsa"]["raw"]):
            logging.info("%s RAW does not exists, maybe archived or deleted", suffix)
            return
        logging.info("%s found raw file %s", suffix, caches["tsa"]["raw"])
        filebasename = os.path.basename(caches["tsa"]["raw"])
        parts= filebasename.split("_")
        filetablename = filebasename.replace("_%s" % parts[-1], "")
        filedatestring = parts[-1].split(".")[0]
        filesuffix = ".".join(parts[-1].split(".")[1:])
        logging.info("found tablename %s, datestring %s, ending %s", filetablename, filedatestring, filesuffix)
        if (filetablename != tablename) or (filedatestring != datestring):
            logging.error("the references raw file seems not to be the correct one")
        else:
            if filesuffix == "csv.gz":
                logging.info("raw file already zipped, this seems not to be the actual one")
                if (len(caches["tsa"]["keys"]) > 0) and (len(caches["tsastat"]["keys"]) > 0) and (len(caches["ts"]["keys"]) > 0) and (caches["quantile"]["exists"] is True):
                    logging.info("%s all generated archives found, raw data could be archived", suffix)
                    archivepath = os.path.join(args.archivedir, datestring, project, tablename)
                    archivefilename = os.path.join(archivepath, os.path.basename(caches["tsa"]["raw"]))
                    if not os.path.isdir(archivepath):
                        logging.info("creating directory %s", archivepath)
                        os.makedirs(archivepath)
                    logging.info("%s moving raw file to %s", suffix, archivefilename)
                    shutil.move(caches["tsa"]["raw"], archivefilename)
                else:
                    logging.info("%s not all archives available, generate them first, before archiving raw data", suffix)
    del caches
    del datalogger
Exemple #5
0
    def get_scatter_data(args):
        """
        gets scatter plot data of two value_keys of the same tablename

        ex: Datalogger/{projectname}/{tablename}/{datestring}/{value_keyname1}/{value_keyname2}/{stat function name}

        value_keyname{1/2} has to be one of get_value_keynames
        stat function name has to be one of get_stat_func_names

        returns:
        json(highgraph data)
        """
        assert len(args) == 6
        project, tablename, datestring, value_key1, value_key2, stat_func_name = args
        logging.info("project : %s", project)
        logging.info("tablename : %s", tablename)
        logging.info("datestring : %s", datestring)
        logging.info("value_key1 : %s", value_key1)
        logging.info("value_key2 : %s", value_key2)
        datalogger = DataLogger(basedir, project, tablename)
        tsastats = datalogger.load_tsastats(datestring)
        hc_scatter_data = []
        for key, tsstat in tsastats.items():
            hc_scatter_data.append({
                "name" : str(key),
                "data" : ((tsstat[value_key1]["avg"], tsstat[value_key2]["avg"]), )
            })
        return json.dumps(hc_scatter_data)
Exemple #6
0
 def sr_hrstorage_unused(args):
     """
     special report to get a report of unused SNMP Host Storage
     works only with snmp/hrStorageTable
     """
     datestring, storage_type = args[:2]
     datalogger = DataLogger(basedir, "snmp", "hrStorageTable")
     tsastat = datalogger.load_tsastats(datestring)
     data = []
     data.append(("hostname", "hrStorageDescr", "hrStorageSizeKb", "hrStorageUsedKb", "hrStorageNotUsedKbMin", "hrStorageNotUsedPct"))
     for index_key in tsastat.keys():
         # (u'srvcacdbp1.tilak.cc', u'Physical Memory',
         # u'HOST-RESOURCES-TYPES::hrStorageRam')
         if (u"HOST-RESOURCES-TYPES::%s" % storage_type) not in index_key:
             del tsastat[index_key]
         if index_key[1][:4] in (u"/run", u"/dev", u"/sys"):
             del tsastat[index_key]
     for key, tsstat in tsastat.items():
         sizekb = tsstat["hrStorageSize"]["min"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
         usedkb = tsstat["hrStorageUsed"]["max"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
         notused = sizekb - usedkb
         notused_pct = 0.0
         try:
             notused_pct = 100.0 *  notused / sizekb
         except ZeroDivisionError:
             pass
         data.append((key[0], key[1], "%0.2f" % sizekb, "%0.2f" % usedkb, "%0.2f" % notused, "%0.2f" % notused_pct))
     return json.dumps(data)
Exemple #7
0
    def __init__(self):
        self.ramlog = collections.deque(maxlen=settings['ramlog']['length'])
        self.fslog_last = 0
        self.ramlog_last = 0
        # avoid no data received error right at the beginning
        self.commdead_last = time.time()

        strace = settings['trace']
        self.trace = ProzedaTrace(strace['dir'] + strace['prefix'],
                                  strace['suffix'])

        ProzedaLogdata.set_config(settings['system'])
        ProzedaHistory.set_config(settings)

        sserial = settings['serialport']
        serialport = serial.Serial(sserial['port'], sserial['baudrate'])

        self.prozeda = ProzedaReader(serialport)
        sfslog = settings['fslog']
        self.fslog = DataLogger(sfslog['dir'] + sfslog['prefix'],
                                sfslog['suffix'])

        self.prozeda.evt_rawdata_received = self.trace.data_received

        JsonrpcRequestHandler.pirozeda = self
Exemple #8
0
    def get_quantile(self, project, tablename, datestring, args):
        """
        return exported QuantileArray json formatted

        ex: Datalogger/get_quantile/{projectname}/{tablename}/{datestring}

        [
            dict of index_keys : dict of quantile,
            list of index_keys,
            list of value_names,
        ]

        returns:
        json(quantile_dict)
        """
        logging.info("optional arguments received: %s", args)
        datalogger = DataLogger(basedir, project, tablename)
        quantile = datalogger.load_quantile(datestring)
        if len(args) > 0:
            value_keyname = args[0]
            ret_data = []
            # build header
            ret_data.append(list(datalogger.index_keynames) + ["Q0", "Q1", "Q2", "Q3", "Q4"])
            # data part
            for k, v  in quantile[value_keyname].quantile.items():
                ret_data.append(list(k) + v.values())
            return json.dumps(ret_data)
        return quantile.to_json()
Exemple #9
0
    def record_logs(self, user_id, success=False):
        """
        Special callback added to this callback plugin
        Called by Runner object
        :param user_id:
        :return:
        """

        log_storage = DataLogger()
        return log_storage.save_log(user_id, self.logger.log,
                                    self.logger.runtime, success)
Exemple #10
0
    def start(self, duration, comment=None):
        if self.starttime == None:
            self.starttime = time.time()

        if duration is None:
            duration = 0
        self.stoptime = time.time() + int(duration)

        if self.logger is None:
            self.logger = DataLogger(self.fileprefix, self.filesuffix,
                                     '%Y-%m-%d_%H.%M.%S', True)

        if comment is not None and self.logger is not None:
            self.logger.write(None, 'c', comment)
Exemple #11
0
    def get_tsastats_func(self, args):
        """
        return json data to render html table from it

        parameters:
        <b>project</b> project string
        <b>tablename</b> tablename string
        <b>datestring</b> datestring in YYYY-MM-DD form
        <b>stat_func_name</b> statistical function
        """
        project, tablename, datestring, stat_func_name = args
        datalogger = DataLogger(basedir, project, tablename)
        tsastats = datalogger.load_tsastats(datestring)
        return json.dumps(tsastats.to_csv(stat_func_name))
Exemple #12
0
def loop(client, account_currency, payment_method_currency, transfers):
    sell_target = 141.00
    quote_sell_at = 0.95  # Ratio of sell price when quotes will be obtained (0.00 to 1.00)

    exRates = MyExRates(client.client, account_currency,
                        payment_method_currency)
    data_logger = DataLogger(exRates.dict, 'pricelog.csv')

    while True:
        exRates = MyExRates(client.client, account_currency,
                            payment_method_currency)
        account = client.client.get_account(transfers.wallet)

        spot_value = exRates.spot_price * float(account['balance']['amount'])

        print()
        print('Account Balance: ' + str(account['balance']['amount']) + ' ' +
              account['balance']['currency'])
        print('Spot Price: ' + str(exRates.spot_price))
        print('Spot Value: ' + str(spot_value))
        print('Spot value at ' + str("%.2f" %
                                     (spot_value / sell_target * 100)) +
              '% of target (' + str(sell_target) + ').')

        if spot_value > sell_target * quote_sell_at:
            quote = client.client.sell(transfers.wallet,
                                       amount=str(
                                           account['balance']['amount']),
                                       currency='BTC',
                                       payment_method=transfers.payment_method,
                                       quote=True)

            print('Spot price within ' + str(quote_sell_at * 100) +
                  '% of target - Getting quote')
            if float(quote['total']['amount']) > sell_target:
                print('Attempting Sell')
                sell = client.client.sell(
                    transfers.wallet,
                    amount=str(account['balance']['amount']),
                    currency=account['balance']['currency'],
                    payment_method=transfers.payment_method,
                    quote=False)

                print('Sold ' + sell['total']['amount'])
            else:
                print('Quote of ' + quote['total']['amount'] +
                      ' too low - No sell')

        data_logger.add_line(exRates.dict)
        time.sleep(10)
Exemple #13
0
    def get_projects():
        """
        get available projects for this Datalogger Server

        ex: Datalogger/get_projects/...
        there is no further argument needed

        returns:
        json(existing project names)
        """
        ret_data = {
            "projects" : DataLogger.get_projects(basedir),
            "stat_func_names" : TimeseriesStats.stat_funcs.keys(),
            "last_businessday_datestring" : DataLogger.get_last_business_day_datestring()
        }
        return json.dumps(ret_data)
Exemple #14
0
    def aggregate_data():
        """
         Compute data aggregate for whole period (since last email report)
         and save it to file (bad idea! it shouldn't)
        :return: None
        """
        # Load the entire data file
        df = DataLogger.read_data_log(DATA_LOG_FILE, False)

        # compute the average for all existing data
        df[3].append(PollData.aggregate(df[2]))

        # save a new file with ONLY the aggregate data
        now = dt.now()
        DataLogger.create_data_log(DATA_LOG_FILE, True, now, now, None, df[3])
        logger.info("Aggregate data complete. New data file created.")
Exemple #15
0
def data(logged_data):
    if not os.path.exists(logged_data):
        return '<br/>'.join(os.listdir('.'))
    data = DataLogger(logged_data).read()
    start_time = float(data[0][0])
    data = [[float(a) - start_time, float(b)] for a, b in data]
    return render_template('data.html', data=data, data_filename=logged_data)
Exemple #16
0
    def sr_vicenter_unused_mem(args):
        """
        special resport to find virtual machine which are not used their ram entirely
        on this machines there is a possibility to save some virtual memory

        works only for VMware machine, in special virtualMachineMemoryStats
        """
        datestring = args[0]
        datalogger = DataLogger(basedir, "vicenter", "virtualMachineMemoryStats")
        tsastat = datalogger.load_tsastats(datestring)
        tsastat_g = datalogger.tsastat_group_by(tsastat, ("hostname", ))
        data = []
        data.append(("hostname", "avg_active_max", "avg_granted_min", "avg_notused_min"))
        for key in tsastat_g.keys():
            not_used = tsastat_g[key]["mem.granted.average"]["min"] - tsastat_g[key]["mem.active.average"]["max"]
            data.append((key[0], "%0.2f" % tsastat_g[key]["mem.active.average"]["max"], "%0.3f" % tsastat_g[key]["mem.granted.average"]["min"], "%0.2f" % not_used))
        return json.dumps(data)
Exemple #17
0
 def __init__(self):
     self.config = Config()
     self.mcu = MCU()
     self.detecting = Detecting()
     # fb = Firebase()
     ws = webserver.WebServer()
     dl = DataLogger()
     
Exemple #18
0
def main():
    for datestring in tuple(DataLogger.datewalker(startdate, args.enddate)):
        start_ts, stop_ts = DataLogger.get_ts_for_datestring(datestring)
        logging.debug("working on datestring %s (from %s to %s)", datestring, start_ts, stop_ts)
        for project in DataLogger.get_projects(args.basedir):
            if args.project is not None:
                if project != args.project:
                    logging.debug("skipping project %s", project)
                    continue
            logging.debug("working on project %s", project)
            for tablename in DataLogger.get_tablenames(args.basedir, project):
                if args.tablename is not None:
                    if tablename != args.tablename:
                        logging.debug("skipping tablename %s", tablename)
                        continue
                    logging.debug("working on tablename %s", tablename)
                archive(project, tablename, datestring)
    def __init__(self, name):
        adc = ADC("Analog input",0)
        self.pressure = Strain_PressureSensor("Pressure (kPa)",adc)
        print "Pressure is %s kPa" % self.pressure.get_pressure_kpa()

        # put Port 8 Pin 3&4 into mode 7 (GPIO_1_6 & GPIO_1_7)
        open('/sys/kernel/debug/omap_mux/gpmc_ad6', 'wb').write("%X" % 7)
        open('/sys/kernel/debug/omap_mux/gpmc_ad7', 'wb').write("%X" % 7)
        gpio1_6 = GPIO(38,GPIO.OUTPUT) #p8_3
        gpio1_7 = GPIO(39,GPIO.OUTPUT) #p8_4
        self.vi = Valve("Inlet",gpio1_6)
        self.vo = Valve("Outlet",gpio1_7)

        # put Port 8 Pin 5 into mode 7 (GPIO_1_2)
        open('/sys/kernel/debug/omap_mux/gpmc_ad2', 'wb').write("%X" % 7)
        gpio1_2 = GPIO(34,GPIO.OUTPUT) #p8_5
        self.pump = Pump("Water cooling pump", gpio1_2)

        # put Port 9 Pin 12 into mode 7 (GPIO_1_28)
        gpio1_28 = GPIO(60,GPIO.INPUT) #p9_12
        self.waterlevel = WaterLevel("Waterlevel sensor", gpio1_28)

        sck=GPIO(49,GPIO.OUTPUT) #p9_23
        s0=GPIO(115,GPIO.INPUT)  #p9_27
        cs_t1=GPIO(48,GPIO.OUTPUT) #p9_15 GPIO1_16 48
        cs_t2=GPIO(117,GPIO.OUTPUT) #p9_25
        self.max1 = MAX31855(cs_t1, sck, s0)  #Maxim IC No 1, connected to the cartidge heater TC
        self.max2 = MAX31855(cs_t2, sck, s0)  #Maxim IC No 2, connected to the TC at the bottom of the vessel

        # PWMHeater
        self.h = PWMHeater("Cartridge heater (%)", "ehrpwm1a")
        self.h.setfrequency(3)
        
        # DS18B20
        T1="28-000003f5b1c9"
        T2="28-000003f5baa4"
        T3="28-000003f5be11"
        
        self.ds1=DS18B20("T in (C)",T1)
        self.ds2=DS18B20("T out (C)",T2)
        self.ds3=DS18B20("T Env (C)",T3)
        
        print "ds1: ",self.ds1.get_temperature()
        print "ds2: ",self.ds2.get_temperature()
        print "ds3: ",self.ds3.get_temperature()
        
        # CS5490 power meter
        self.cs=CS5490("Power In (W)")

        print "start DataLogger"
        self.log = DataLogger("my datalogger",500)
        self.log.add_sensor(("T Vessel bottom (C)","Maxim 2 Cld jnct (C)"),self.max2.get_celsius)
        self.log.add_sensor(("T Cartridge (C)","Maxim 1 Cld jnct (C)"),self.max1.get_celsius)
        self.log.add_sensor(self.ds1.get_name(),self.ds1.get_temperature)
        self.log.add_sensor(self.ds2.get_name(),self.ds2.get_temperature)
        self.log.add_sensor(self.ds3.get_name(),self.ds3.get_temperature)
        self.log.add_sensor(self.pressure.get_name(),self.pressure.get_pressure_kpa)
        self.log.add_sensor(self.cs.get_name(),self.cs.get_average_power)
Exemple #20
0
def report_group(project, tablename, datestring1, datestring2, value_key):
    # get data, from datalogger, or dataloggerhelper
    datalogger = DataLogger(BASEDIR, project, tablename)
    dataloggerweb = DataLoggerWeb(DATALOGGER_URL)
    print "loading data"
    starttime = time.time()
    # tsa1 = datalogger.load_tsa(datestring1)
    tsa1 = dataloggerweb.get_tsa(project, tablename, datestring1)
    tsa1 = datalogger.group_by(datestring1, tsa1, ("hostname",), lambda a, b: (a + b) / 2)
    # tsa2 = datalogger.load_tsa(datestring2)
    tsa2 = dataloggerweb.get_tsa(project, tablename, datestring2)
    tsa2 = datalogger.group_by(datestring2, tsa2, ("hostname",), lambda a, b: (a + b) / 2)
    print "Duration load %f" % (time.time() - starttime)
    starttime = time.time()
    cm = CorrelationMatrixTime(tsa1, tsa2, value_key)
    print "TOP most differing keys between %s and %s" % (datestring1, datestring2)
    for key, coefficient in sorted(cm.items(), key=lambda items: items[1], reverse=True)[:20]:
        print key, coefficient
Exemple #21
0
    def get_tsstat_caches(args):
        """
        DEPRECATED use get_caches instead

        get a list of all available TimeseriesStats available
        attention: there are only tsstat caches if raw data is already analyzed

        ex: Datalogger/get_tsstat_caches/{projectname}/{tablename}/{datestring}

        returns:
        json(list of all available TimeseriesStats data)
        """
        project, tablename, datestring = args[:3]
        datalogger = DataLogger(basedir, project, tablename)
        keys = []
        for cache in datalogger.list_tsstat_caches(datestring):
            keys.append(cache[1])
        return json.dumps(keys)
Exemple #22
0
    def get_quantile(self, args):
        """
        return exported QuantileArray json formatted

        ex: Datalogger/get_quantile/{projectname}/{tablename}/{datestring}

        [
            dict of index_keys : dict of quantile,
            list of index_keys,
            list of value_names,
        ]

        returns:
        json(quantile_dict)
        """
        project, tablename, datestring = args[:3]
        datalogger = DataLogger(basedir, project, tablename)
        quantile = datalogger.load_quantile(datestring)
        return quantile.to_json()
Exemple #23
0
    def sr_vicenter_unused_cpu_cores(args):
        """
        special report to find virtual machine which re not used their virtual core entirely
        on this machine there is a possibility to save some virtual cores

        works only for VMware machines, in special virtualMachineCpuStats
        """
        datestring = args[0]
        datalogger = DataLogger(basedir, "vicenter", "virtualMachineCpuStats")
        tsastat = datalogger.load_tsastats(datestring)
        tsastat_g = datalogger.tsastat_group_by(tsastat, ("hostname", ))
        data = []
        data.append(("hostname", "avg_idle_min", "avg_used_avg", "avg_used_max"))
        for key in tsastat_g.keys():
            num_cpu = sum([key[0] in index_key for index_key in tsastat.keys()])
            if num_cpu < 3:
                continue
            data.append((key[0], "%0.2f" % tsastat_g[key]["cpu.idle.summation"]["min"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["avg"], "%0.2f" % tsastat_g[key]["cpu.used.summation"]["max"]))
        return json.dumps(data)
Exemple #24
0
    def get_tsastats(self, args):
        """
        return exported TimeseriesArrayStats json formatted

        [
            list of index_keys,
            list of value_keys,
            list of [
                index_key : tsstat_dictionary
                ]
        ]

        returns:
        json(tsastats_dict)
        """
        project, tablename, datestring = args[:3]
        datalogger = DataLogger(basedir, project, tablename)
        tsastats = datalogger.load_tsastats(datestring)
        return tsastats.to_json()
Exemple #25
0
    def get_last_business_day_datestring(args):
        """
        get datestring of last businessday Mo.-Fr.

        ex: Dataloger/get_last_business_day_datestring/...

        returns:
        json(datestring of last businessday)
        """
        return json.dumps(DataLogger.get_last_business_day_datestring())
Exemple #26
0
 def get_lt_ts(self, project, tablename, args):
     """
     get longtime statistical values
     """
     # datestringStart + "/" + datestringStop + "/" + Base64.encode(indexKey) + "/" + valueKeyname + "/" + statFuncName
     start, stop, index_key_enc, value_keyname, stat_func_name = args
     index_key = tuple([unicode(key_value) for key_value in eval(base64.b64decode(index_key_enc))])
     datalogger = DataLogger(basedir, project, tablename)
     filterkeys = dict(zip(datalogger.index_keynames, index_key))
     ret_data = []
     for datestring in datalogger.datewalker(start, stop):
         try:
             tsastats = datalogger.load_tsastats(datestring, filterkeys=filterkeys)
             ret_data.append([datestring, tsastats[index_key][value_keyname][stat_func_name]])
         except DataLoggerRawFileMissing as exc:
             logging.error("No Input File for datestring %s found, skipping this date", datestring)
         except DataLoggerLiveDataError as exc:
             logging.error("Reading from live data is not allowed, skipping this data, and ending loop")
             break
     return json.dumps(ret_data)
Exemple #27
0
    def get_projects(args):
        """
        get available projects for this Datalogger Server

        ex: Datalogger/get_projects/...
        there is no further argument needed

        returns:
        json(existing project names)
        """
        return json.dumps(DataLogger.get_projects(basedir))
Exemple #28
0
    def get_ts_caches(args):
        """
        DEPRECATED use get_caches instead

        get name of all index keys found in one specific TimeseriesArray
        useful to build autofill input fields
        attention: there are only ts caches if the raw data is already converted

        ex: Datalogger/get_ts_caches/{projectname}/{tablename}/{datestring}
        datstring has to be in format YYYY-MM-DD

        returns:
        json(list of all index keys)
        """
        project, tablename, datestring = args[:3]
        datalogger = DataLogger(basedir, project, tablename)
        keys = []
        for cache in datalogger.list_ts_caches(datestring):
            keys.append(cache[1])
        return json.dumps(keys)
Exemple #29
0
 def get_tsastats_table(self, args):
     """
     return html renderer table from tsatstats data
     """
     def csv_to_table(csvdata, keys):
         outbuffer = []
         outbuffer.append("<thead><tr>")
         [outbuffer.append("<th>%s</th>" % header) for header in csvdata[0]]
         outbuffer.append("</tr></thead><tbody>")
         for values in csvdata[1:]:
             outbuffer.append("<tr>")
             [outbuffer.append("<td >%s</td>" % value) for value in values[0:keys]]
             [outbuffer.append("<td type=numeric>%0.2f</td>" % value) for value in values[keys:]]
             outbuffer.append("</tr>")
         outbuffer.append("</tbody>")
         return outbuffer
     project, tablename, datestring, stat_func_name = args
     datalogger = DataLogger(basedir, project, tablename)
     tsastats = datalogger.load_tsastats(datestring)
     return json.dumps("\n".join(csv_to_table(tsastats.to_csv(stat_func_name), len(tsastats.index_keys))))
Exemple #30
0
def webControl(x, y):
    x = int(x)
    y = int(y)
    usrMsgLogger = DataLogger()
    evobot = EvoBot("/dev/tty.usbmodemfa131", usrMsgLogger)
    head = Head(evobot)
    syringe = Syringe(evobot, 9)
    syringe.plungerSetConversion(1)
    evobot.home()

    head.move(x, y)
Exemple #31
0
    def get_caches_dict(args):
        """
        DEPRECATED use get_caches instead

        get name of all index keys found in one specific TimeseriesArray

        parameters:
        /<str>project/<str>tablename/<str>datestring

        returns:
        <json><list> of all index combinations
        """
        # the same for all vicenter data
        project, tablename, datestring = args[:3]
        datalogger = DataLogger(basedir, project, tablename)
        keys = []
        for cache in datalogger.list_ts_caches(datestring):
            key = dict(zip(datalogger.index_keynames, cache[1][1]))
            keys.append(key)
        return json.dumps(keys)
Exemple #32
0
    def get_caches(args):
        """
        return dictionary of caches available for this project/tablename/datestring combination

        ex: Datalogger/get_caches/{project}/{tablename}/{datestring}

        {
            "tsastat" : {
                "keys" : dictionary of available keys,
                "pattern" : filename pattern,
            },
            "tsstat" : {
                "keys" : dictionary of available keys,
                "pattern" : filename pattern,
            },
            "tsa":
                "keys" : dictionary of available keys,
                "pattern" : filename pattern,
            },
            "ts" : {
                "keys" : dictionary of available keys,
                "pattern" : filename pattern,
            },
            "raw" : None or filename of raw data,
        }

        if return_date["raw"] == null it means, there is no raw data available
        else if something (tsa,ts,tsastat,tsstat) is missing you can call get_tsastat to generate all caches

        returns:
        json(dictionary of caches and available data)
        """
        project, tablename, datestring = args[:3]
        datalogger = DataLogger(basedir, project, tablename)
        caches = {}
        try:
            caches = datalogger.get_caches(datestring)
        except StandardError as exc:
            logging.exception(exc)
            logging.error(caches)
        return json.dumps(caches)
Exemple #33
0
    def get_datewalk(args):
        """
        get list of datestrings between two datestrings

        ex: Datalogger/get_datewalk/{datestring1}/{datestring2}

        returns:
        json(list of datestrings)
        """
        datestring1, datestring2 = args[:2]
        data = tuple(DataLogger.datewalker(datestring1, datestring2))
        return json.dumps(data)
Exemple #34
0
    def get_tablenames(project):
        """
        get available tablenames, for one particular project
        uses directory listing in raw subdirectory for this purpose

        ex: Datalogger/get_tablenames/{projectname}
        <projectname> has to be something from Datalogger/get_projects

        returns:
        json(list of possible tablenames for given project)
        """
        return json.dumps(DataLogger.get_tablenames(basedir, project))
Exemple #35
0
 def get_tsa_adv(self, args):
     """
     return exported TimeseriesArray json formatted
     """
     group_funcs = {
         "avg" : lambda a, b: (a+b)/2,
         "min" : min,
         "max" : max,
         "sum" : lambda a, b: a+b,
     }
     logging.info(args)
     project, tablename, datestring, groupkeys_enc, group_func_name, index_pattern_enc = args
     groupkeys_dec = eval(base64.b64decode(groupkeys_enc)) # should be tuple
     logging.info("groupkeys_dec: %s", groupkeys_dec)
     index_pattern = base64.b64decode(index_pattern_enc)
     if index_pattern == "None":
         index_pattern = None
     logging.info("index_pattern: %s", index_pattern)
     assert group_func_name in group_funcs.keys()
     datalogger = DataLogger(basedir, project, tablename)
     tsa = None
     # gete data
     if groupkeys_dec is not None:
         logging.info("groupkeys is %s", groupkeys_dec)
         groupkeys = tuple([unicode(key_value) for key_value in groupkeys_dec])
         tsa1 = datalogger.load_tsa(datestring, index_pattern=index_pattern)
         tsa = datalogger.group_by(datestring, tsa1, groupkeys, group_funcs[group_func_name])
     else:
         logging.info("groupkeys is None, fallback to get ungrouped tsa")
         tsa = datalogger.load_tsa(datestring, index_pattern=index_pattern)
     logging.info(tsa.keys()[0])
     web.header('Content-type', 'text/html')
     # you must not set this option, according to
     # http://stackoverflow.com/questions/11866333/ioerror-when-trying-to-serve-file
     # web.header('Transfer-Encoding','chunked')
     yield "[" + json.dumps(tsa.export().next())
     for chunk in tsa.export():
         #logging.info("yielding %s", chunk)
         yield "," + json.dumps(chunk)
     yield "]"
Exemple #36
0
def gen_caches(project, tablename, datestring):
    datalogger = DataLogger(basedir, project, tablename)
    caches = datalogger.get_caches(datestring)
    suffix = "%s/%s/%s\t" % (datestring, project, tablename)
    data = None
    if caches["tsa"]["raw"] is None:
        if len(caches["tsa"]["keys"]) == 0:
            logging.info("%s RAW Data not availabale maybe archived, tsa exists already", suffix)
        else:
            logging.debug("%s RAW Data is missing, no tsa archive exists", suffix)
    else:
        if len(caches["tsa"]["keys"]) == 0:
            logging.info("%s TSA Archive missing, calling get_tsa and load_tsastats", suffix)
            data = datalogger.load_tsa(datestring)
        else:
            if len(caches["tsastat"]["keys"]) == 0:
                logging.info("%s TSASTAT Archive missing, calling load_tsastats", suffix)
                data = datalogger.load_tsastats(datestring)
            else:
                if len(caches["ts"]["keys"]) == 0:
                    logging.info("%s there are no ts archives, something went wrong, or tsa is completely empty, calling load_tsastats", suffix)
                    data = datalogger.load_tsastats(datestring)
                else:
                    logging.debug("%s All fine", suffix)
            if caches["quantile"]["exists"] is not True:
                logging.info("%s Quantile archive is missing, calling load_quantile", suffix)
                data = datalogger.load_quantile(datestring)
    del data
    del caches
    del datalogger
Exemple #37
0
    def get_tsastats(self, project, tablename, datestring, args):
        """
        return exported TimeseriesArrayStats json formatted

        [
            list of index_keys,
            list of value_keys,
            list of [
                index_key : tsstat_dictionary
                ]
        ]

        if optional args is given, only one specific statistical function is returned

        returns:
        json(tsastats_dict)
        """
        logging.info("optional arguments received: %s", args)
        datalogger = DataLogger(basedir, project, tablename)
        tsastats = datalogger.load_tsastats(datestring)
        if len(args) > 0:
            return json.dumps(tsastats.to_csv(args[0]))
        return tsastats.to_json()
Exemple #38
0
    def sr_hrstorageram_unused(args):
        """
        special report to find servers which are not using their ram entirely
        specially on virtual machines are is a huge saving potential

        works only for snmp data especially hrStorageTable
        """
        datestring = args[0]
        datalogger = DataLogger(basedir, "snmp", "hrStorageTable")
        tsastat = datalogger.load_tsastats(datestring)
        data = []
        data.append(("hostname", "hrStorageSizeKb", "hrStorageUsedKb", "hrStorageNotUsedKbMin", "hrStorageNotUsedPct"))
        for index_key in tsastat.keys():
            # (u'srvcacdbp1.tilak.cc', u'Physical Memory',
            # u'HOST-RESOURCES-TYPES::hrStorageRam')
            if u'HOST-RESOURCES-TYPES::hrStorageRam' not in index_key:
                del tsastat[index_key]
        for key, tsstat in datalogger.tsastat_group_by(tsastat, ("hostname", )).items():
            sizekb = tsstat["hrStorageSize"]["min"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
            usedkb = tsstat["hrStorageUsed"]["max"] * tsstat["hrStorageAllocationUnits"]["max"] / 1024
            notused = sizekb - usedkb
            notused_pct = 100.0 *  notused / sizekb
            data.append((key[0], "%0.2f" % sizekb, "%0.2f" % usedkb, "%0.2f" % notused, "%0.2f" % notused_pct))
        return json.dumps(data)
Exemple #39
0
def main():
    # Parse command line arguments
    parser = optparse.OptionParser()
    parser.add_option('-c', '--config', 
                      help = 'read configuration from FILE [default: %default]', 
                      metavar = 'FILE', 
                      default = 'campbell.conf')
    (options, args) = parser.parse_args()
    
    # Read configuration file

    cf = ConfigParser.SafeConfigParser()
    print 'configuration read from %s' % cf.read(options.config)
        
    for pakbus_id in cf.get('pakbus', 'dataloggers').split(','):
        pakbus_id = int(pakbus_id, base = 0)
        dl = DataLogger(cf.get('pakbus', 'host'),
                        pakbus_id,
                        int(cf.get('pakbus', 'my_node_id'), base = 0),
                        cf.getint('pakbus', 'timeout'))
        print "ringing node {}...".format(pakbus_id)
        
        dl.ring()
        dl.hello()
Exemple #40
0
def main():
    if len(sys.argv) < 2:
        print 'USAGE: runtracker.py participant_id [save_location]'
        return

    S = MARKER_SIZE
    F = 1

    participant_id = sys.argv[1]

    save_location = DATA_DIR
    if len(sys.argv) >= 3:
        save_location = sys.argv[2]

    # Mouse callback for setting origin point
    def mouse_callback(event, x, y, flags, param):
        if datalog.is_running():
            return

        if event == cv2.EVENT_LBUTTONDOWN:
            # Set new origin_y
            tracker.set_origin(y)

    # Open webcam
    cap = cv2.VideoCapture(TRACKER_CAM)
    if not cap.isOpened():
        print 'Error opening tracker camera!'
        return

    # Open facecam (for separate recording)
    facecap = None
    if FACE_CAM is not None:
        facecap = cv2.VideoCapture(FACE_CAM)
        if not facecap.isOpened():
            print 'Error opening face camera!'
            facecap = None

    # Get video parameters (try to retain same attributes for output video)
    width = float(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = float(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = float(cap.get(cv2.CAP_PROP_FPS))

    # Create instances
    track_finder = mk.MarkerFinder(GREEN_COLOR_MIN, GREEN_COLOR_MAX)
    tracker = WristTracker(track_finder, S, F)
    tracker.set_origin(int(height / 2))

    cprstatus = CPRStatus(CPR_BUFFER_SIZE)
    statussender = StatusSender(SOCK_ADDR, SOCK_PORT)
    datalog = DataLogger(fps if (0 < fps <= 60) else 30, width, height,
                         save_location, facecap is not None)

    trainer_on = True

    cur_position = 0
    cur_size = 0

    last_rate, last_depth, last_recoil, last_code = 0, 0, 0, 0

    while True:
        # Get frame
        ret, frame = cap.read()
        if ret is False or frame is None:
            break

        faceframe = None
        if facecap:
            faceret, faceframe = facecap.read()

        # Get dimensions
        h, w, _ = frame.shape
        center = (w / 2, h / 2)
        '''
        Output Drawing
        '''

        # Make output image
        output = frame.copy()

        # Display program status
        cv2.putText(
            output,
            '<NOT RUNNING>' if not datalog.is_running() else 'RECORDING ' +
            datalog.get_filename() + ' [' + str(datalog.get_index()) + ']',
            (0, 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
        cv2.putText(
            output, 'R: ' + str(last_rate) + ' D: ' + str(last_depth) +
            ' C: ' + str(last_recoil) + ' S: ' + str(last_code), (0, 60),
            cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)

        # Draw center and origin lines
        cv2.line(output, (center[0], 0), (center[0], h), (0, 0, 255), 1)
        cv2.line(output, (center[0] - 20, int(tracker.get_origin())),
                 (center[0] + 20, int(tracker.get_origin())), (0, 0, 255), 1)

        # Draw instructions
        if not datalog.is_running():
            cv2.putText(
                output, 'c - set clr at orig | t - trainer is ' +
                ('on' if trainer_on else 'off') + ' | space - tgl tracker',
                (0, h - 4), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1)
        '''
        Tracking
        '''

        # Get tracked marker from image
        tracked_marker = tracker.get_marker(frame, output)
        if tracked_marker:
            draw_marker(output, tracked_marker.marker, tracked_marker.size,
                        tracked_marker.distance, tracked_marker.position)

            # Update cur position/size (for calibration)
            cur_position = tracked_marker.y
            cur_size = tracked_marker.size
        '''
        Analysis
        '''

        if datalog.is_running():
            if tracked_marker:
                # Analyze CPR status
                rate, depth, recoil, code = cprstatus.update(tracked_marker)

                if code is not None:
                    last_rate, last_depth, last_recoil, last_code = rate, depth, recoil, code  # Update
                    print 'R: ' + str(last_rate) + ' D: ' + str(
                        last_depth) + ' C: ' + str(last_recoil) + ' S: ' + str(
                            last_code)

                    # Send status if trainer is turned on
                    if trainer_on:
                        if not (last_rate == 0 and last_depth == 0
                                and last_recoil == 0):
                            print 'send status ' + str(code)
                            statussender.send_status(code)

                datalog.log(frame, output, tracker.get_origin(),
                            tracked_marker.position if tracked_marker else 0,
                            rate, depth, recoil, code, faceframe)
        '''
        Show Output
        '''

        # Resize frame
        #output = cv2.resize(output, (OUT_WIDTH, OUT_HEIGHT))

        # Show frame
        #cv2.imshow('Frame', frame)
        cv2.imshow('Output', output)
        cv2.setMouseCallback('Output', mouse_callback)

        if faceframe is not None:
            cv2.imshow('Face', faceframe)
        '''
        Process Keypresses
        '''

        k = cv2.waitKey(1)
        if k == 27:
            datalog.stop()
            break
        elif k == ord('v'):
            track_finder.set_color(VIOLET_COLOR_MIN, VIOLET_COLOR_MAX)
        elif k == ord('g'):
            track_finder.set_color(GREEN_COLOR_MIN, GREEN_COLOR_MAX)
        elif k == ord('y'):
            track_finder.set_color(YELLOW_COLOR_MIN, YELLOW_COLOR_MAX)
        elif k == ord('c'):
            # Calibrate tracker (color)
            color = (0, 0, 0)

            # Get origin pixel color
            pixel = frame[int(tracker.get_origin())][center[0]]
            hsv = cvt_bgr2hsv(pixel)
            print 'Calibrate color: ' + str(hsv) + ' | BGR: ' + str(pixel)

            # Apply HSV range
            '''
            lower_bound = tuple(map(lambda x, y: max(0, x - y), hsv, COLOR_VARIANCE))
            upper_bound = tuple(map(lambda x, y: min(255, x + y), hsv, COLOR_VARIANCE))
            '''
            lower_bound = (max(0, hsv[0] - COLOR_MIN[0]), COLOR_MIN[1],
                           COLOR_MIN[2])
            upper_bound = (min(255, hsv[0] + COLOR_MAX[0]), COLOR_MAX[1],
                           COLOR_MAX[2])
            #'''

            track_finder.set_color(lower_bound, upper_bound)
            print 'Color range: ' + str(lower_bound) + ' to ' + str(
                upper_bound)
        elif k == ord('t'):
            # Toggle trainer
            trainer_on = not trainer_on
        elif k == 32:
            # Toggle on/off
            if datalog.is_running():
                datalog.stop()
            else:
                # Reset logger
                cur_time = datetime.now()
                time_str = cur_time.strftime('%m-%d-%y_%H%M%S')
                datalog.start('CPR_' + str(participant_id) + '_' + time_str)
                cprstatus.reset()

                # Set tracker origin/size
                tracker.set_origin(cur_position)
                tracker.set_force_px_size(cur_size)

    cap.release()
Exemple #41
0
        opts.video = int(opts.video)
    except ValueError:
        pass
    frmbuf = VideoBuffer(opts.video,
                         opts.start,
                         opts.stop,
                         historysize=LAST_DAY + 1,
                         loop=opts.loop)
else:
    frmbuf = ROSCamBuffer(opts.camtopic + "/image_raw",
                          historysize=LAST_DAY + 1,
                          buffersize=30)

# start the node and control loop
rospy.init_node("flownav", anonymous=False)
datalog = DataLogger() if opts.publish else None

kbctrl = None
if opts.camtopic == "/ardrone" and not opts.video:
    kbctrl = KeyboardController(max_speed=0.5, cmd_period=100)
if kbctrl:
    FlatTrim = rospy.ServiceProxy("/ardrone/flattrim", Empty())
    Calibrate = rospy.ServiceProxy("/ardrone/imu_recalib", Empty())

gmain_win = frmbuf.name
cv2.namedWindow(gmain_win, flags=cv2.WINDOW_OPENGL | cv2.WINDOW_NORMAL)
if opts.showmatches:
    cv2.namedWindow(gtemplate_win, flags=cv2.WINDOW_OPENGL | cv2.WINDOW_NORMAL)
smatch.MAIN_WIN = gmain_win
smatch.TEMPLATE_WIN = gtemplate_win
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
from pandas import ExcelWriter
from datalogger import DataLogger

# establish general constants and parameters
number_reads_per_calib = 20
span_O2_concentration = 20.95
span_CO_concentration = 422
span_CO2_concentration = 2350

# create instance of the data logger and check connection
print("\nConnection to data logger")
rm, logger = DataLogger().new_instrument()

print("Starting HRR calibration procedure.\n")

# calibrate DPT
dpt_input = input("Ready to calibrate DPT?\n")
dpt_zero = np.zeros(number_reads_per_calib)
if dpt_input.lower() in ["y", "yes"]:
    for i in range(number_reads_per_calib):
        response, _ = DataLogger.query_data_for_HRR(logger)
        dpt_zero[i] = response[1]
print(f" - Mean zero value in volts for:\n  DPT: {dpt_zero.mean()}")

# calibrate zero levels for the gases
gases_zero_input = input("\nReady to calibrate zero for gases?\n")
oxygen_zero = np.zeros_like(dpt_zero)
Exemple #43
0
from pandas import ExcelWriter

# add path to import functions and classes (absolute path on the FPA's computer)
sys.path.insert(
    1,
    r"C:\\Users\\FireLab\\Desktop\\Simon\\FeedbackControl_MassExperiments\\classes_and_functions"
)
from datalogger import DataLogger

#####
# CONNECT TO LOAD CELL AND DATA LOGGER AND INSTANTIATE CLASSES
#####

# create instance of the data logger and check connection
print("\nConnection to data logger")
rm, logger = DataLogger().new_instrument()

#####
# CALIBRATE THE LAMPS.
#####

# define constants
lamp_voltage_limit = 4.5

# define arrays used for polynomial fitting
hf_gauge_factor = 0.0001017  # V/kW/m2
nmbr_readings_pervoltage = 20
output_voltages = np.linspace(0, lamp_voltage_limit, 20)
all_output_voltages = np.zeros(nmbr_readings_pervoltage *
                               len(output_voltages) * 2)
all_input_voltages = np.zeros_like(all_output_voltages)
def main():
    args = vars(ap.parse_args())

    # create frame counter
    fps_counter = FPSCounter()

    # total number of blinks
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        vs.stream.set(cv2.CAP_PROP_FPS, 15)
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True
        fps = vs.stream.get(cv2.CAP_PROP_FPS)

    # create dataloggers
    datalogger = DataLogger(columns=['ear', 'adr'])

    # blink detector
    blink_detector = BlinkDetector(time_window=5,
                                   plot=args['graph'],
                                   frame_delay=10)

    # loop over frames from the video stream
    frame_cnt = 0
    INIT_TIME = None
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # get timestamp
        if fileStream:
            timestamp = frame_cnt / fps
        else:
            if INIT_TIME is None:
                INIT_TIME = time.time()
            timestamp = time.time() - INIT_TIME
            fps = fps_counter.tick()

        # get the new frame
        frame = vs.read()
        frame_cnt += 1
        if frame is None:
            break

        frame = imutils.resize(frame, width=450)
        # it, and convert it to grayscale channels)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # compute the area-over-distance metric
            adr = AreaDistanceRatio.compute(leftEye, rightEye)
            # log ADR
            datalogger.log(adr, 'adr', timestamp)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0
            # log EAR
            datalogger.log(ear, 'ear', timestamp)

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # send new data to blink detector and check if it detected new blinks
            blink_detector.send(adr, timestamp)
            blink = blink_detector.get_blink()
            if blink is not None:
                blink_time, blink_dur = blink
                TOTAL += 1
                print(f"[BLINK] time: {blink_time:.2f}  dur: {blink_dur:.2f}")

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "ADR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "FPS: {:.2f}".format(fps), (300, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # save datafile
    output_file = args['output_file']
    if output_file == 'ask':
        output_file = input("Enter filename to save: ")
    if output_file is not None:
        datalogger.save(output_file)

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Exemple #45
0
# Set up pressure pump switch GUI
pressure_pump_switch_gui = PressurePumpUim(pressure_pump_switch,
                                           main_window_ui)

# Set up picarro code GUI
picarro_code_gui = PicarroCodeUim(picarro_code, main_window_ui)

# Set up state GUI
state_gui = StateUim(state, main_window_ui, state_ui)

# Set up maintenanceTools GUI
maintenancetools_gui = MaintenanceToolsUim(maintenancetools,
                                           maintenancetools_ui)

# Set up Debug log GUI
debuglog_gui = DebuglogUim(debug_logger, debuglog_ui,
                           main_window.debuglog_window)

########################################################################################################################
# DATA LOGGER
########################################################################################################################
data_logger = DataLogger("data", humgen)

########################################################################################################################
# LAUNCH APPLICATION
########################################################################################################################

main_window.show()
sys.exit(app.exec_())
Exemple #46
0
def index():
    global datalogger
    datalogger = DataLogger('data_%d.csv' % int(time.time()))
    return render_template('index.html')
    elif roll < 360:
        orientation = "Roll: NorthWest-North"
        
    return orientation

def find_compass():
    north = sense.get_compass()
    north = round(north, 1)
    
    return north

# initialize text
text.init_text()

# initialize dataloggers
logger = DataLogger()
sense_logger = SenseLogger()

# set timedelta
time_between = timedelta(minutes=10)

# get initial orientations
base_orientation_yaw = find_orientation_by_yaw()
logger.log_orientation(base_orientation_yaw)
base_orientation_roll = find_orientation_by_roll()
logger.log_orientation(base_orientation_roll)

# get baseline for acceleration
base_acc = baseline_acceleration()
last_baseline = datetime.now()
Exemple #48
0
    def main():
        config = configparser.ConfigParser(inline_comment_prefixes=(";", "#"))
        config.read(APP_CONFIG_FILE)

        # fetch interesting system data that we will use for statistics
        swo = sw.SystemWatcher()
        swo._update_host_fs()
        swo._update_cpu_internet_uptime()
        data = PollData.from_system_info(swo.system_info)
        logger.info("fetched system data")

        # TODO - Poll machines
        # m = Machine("RPI-A", "192.168.1.170", 443, "TCP/ping", True)
        # data.machines.append(m)

        # Write system data to permanent storage
        DataLogger.writeline_to_data_log(DATA_LOG_FILE, data)
        logger.info("wrote data to file ./" + DATA_LOG_FILE)

        # Does any value exceed a threshold?
        # In that case we need to send a report immediately
        send_warning = False
        if data.cpu_load >= config.getint("warning_thresholds",
                                          "cpu_utilization_percent"):
            send_warning = True
            logger.info("CPU load threshold exceeded!")
        if data.cpu_temp >= config.getint("warning_thresholds", "cpu_temp"):
            send_warning = True
            logger.info("CPU temp threshold exceeded!")
        if data.disk_usage_percent >= config.getint("warning_thresholds",
                                                    "disk_used_percent"):
            send_warning = True
            logger.info("Disk usage threshold exceeded!")

        # Get the last time we created a report
        dl = DataLogger.read_data_log(DATA_LOG_FILE, True)

        # Is it time to publish to MQTT broker?
        mqtt_enabled, mqtt_interval = ch.get_report(config, "publish_to_mqtt")
        if mqtt_enabled and (dt.now() - dl[1]).total_seconds() > mqtt_interval:
            to_pub = dict()
            to_pub[config.get("MQTT", "topic_alive")] = "on"
            to_pub[config.get("MQTT", "topic_cpu_temp")] = data.cpu_temp
            to_pub[config.get("MQTT", "topic_cpu_load")] = data.cpu_load
            to_pub[config.get("MQTT", "topic_cpu_internet")] = data.internet
            to_pub[config.get(
                "MQTT", "topic_diskusagepercent")] = data.disk_usage_percent
            mqtt_publish(to_pub, config.get("MQTT", "broker_ip"),
                         int(config.get("MQTT", "broker_port")),
                         config.get("MQTT", "broker_user"),
                         config.get("MQTT", "broker_pwd"))

        # Is it time to send an email report?
        email_enabled, email_interval = ch.get_report(config, "send_emails")
        if send_warning or email_enabled and (
                dt.now() - dl[0]).total_seconds() > email_interval:
            # Save report date because it will be overwritten when doing Aggregate
            last_report = dl[0]

            # aggregate data (an unfortunate side-affect is that the last report date will be set to Now()
            aggregate_data()

            # Load data
            dl = DataLogger.read_data_log(DATA_LOG_FILE, False)

            # get email credentials
            email_config = ch.config_section_map(config, "email")
            credentials = gmail.get_email_credentials()
            http = credentials.authorize(httplib2.Http())
            service = discovery.build('gmail',
                                      'v1',
                                      http=http,
                                      cache_discovery=False)

            # build html email from a template
            html_email = gmail.apply_email_template(
                last_report, config.get("app", "name"), data, dl,
                path_join("email_templates/default.html"))

            gmail.send_email_message(
                service, email_config["user"],
                gmail.create_email_message(email_config["from_address"],
                                           email_config["to_address"],
                                           email_config["subject"],
                                           html_email))
            logger.info("Sent email report to " + email_config["to_address"])
Exemple #49
0
        astro_status = True

    return astro_status


# main program
sense = SenseHat()

# set correct rotation for astropi
sense.set_rotation(270)

# initialize text strings
tekster.init_tekst()

# initialize datalogger
logger = DataLogger()

# read baseline humidity until less than 1% variation
baseline = baseline_humidity()
last_baseline = datetime.now()

# set timedelta between each new baseline
time_between = timedelta(minutes=10)

while True:
    # find astronaut
    astronaut_status = find_astronaut(baseline)

    # write to the datalog
    logger.log_data(astronaut_status)
Exemple #50
0
class Pirozeda(object):
    def __init__(self):
        self.ramlog = collections.deque(maxlen=settings['ramlog']['length'])
        self.fslog_last = 0
        self.ramlog_last = 0
        # avoid no data received error right at the beginning
        self.commdead_last = time.time()

        strace = settings['trace']
        self.trace = ProzedaTrace(strace['dir'] + strace['prefix'],
                                  strace['suffix'])

        ProzedaLogdata.set_config(settings['system'])
        ProzedaHistory.set_config(settings)

        sserial = settings['serialport']
        serialport = serial.Serial(sserial['port'], sserial['baudrate'])

        self.prozeda = ProzedaReader(serialport)
        sfslog = settings['fslog']
        self.fslog = DataLogger(sfslog['dir'] + sfslog['prefix'],
                                sfslog['suffix'])

        self.prozeda.evt_rawdata_received = self.trace.data_received

        JsonrpcRequestHandler.pirozeda = self

    def run(self):
        JsonrpcRequestHandler.startserver()

        try:
            while True:

                time.sleep(0.1)
                ts = time.time()
                if not self.prozeda.communication_alive():
                    # nag only every 10 seconds
                    if ts >= (self.commdead_last + 10):
                        self.commdead_last = ts
                        print(str(datetime.now()) + ": no data received")

                if ts >= (self.fslog_last + settings['fslog']['interval']):
                    self.fslog_last = ts
                    logentry = self.prozeda.get_latest_logentry()
                    # TODO: check if logentry is old
                    if logentry is not None:
                        self.fslog.write(logentry.timestamp, "d",
                                         base64.b64encode(logentry.data))
                        # fslog.flush() # only enable flush for development
                        # (log will be written to file system for every line)
                    else:
                        self.fslog.write(time.time(), "w", "no data")

                if ts >= (self.ramlog_last + settings['ramlog']['interval']):
                    self.ramlog_last = ts
                    logentry = self.prozeda.get_latest_logentry()
                    # TODO: check if logentry is old
                    if logentry is not None:
                        self.ramlog.append(logentry)

        except KeyboardInterrupt:
            print("quitting...")
            self.prozeda.stop()
            exit()
Exemple #51
0
class ProzedaTrace(object):
    def __init__(self, fileprefix, filesuffix):
        self.fileprefix = fileprefix
        self.filesuffix = filesuffix

        self.starttime = None
        self.stoptime = None
        self.logger = None

        self.livetrace = collections.deque(maxlen=100)

    def start(self, duration, comment=None):
        if self.starttime == None:
            self.starttime = time.time()

        if duration is None:
            duration = 0
        self.stoptime = time.time() + int(duration)

        if self.logger is None:
            self.logger = DataLogger(self.fileprefix, self.filesuffix,
                                     '%Y-%m-%d_%H.%M.%S', True)

        if comment is not None and self.logger is not None:
            self.logger.write(None, 'c', comment)

    def stop(self):
        if self.logger is not None:
            self.logger.close()
        self.logger = None
        self.starttime = None
        self.stoptime = None

    def stats(self):
        result = {
            'file': None,
            'start': self.starttime,
            'stop': self.stoptime,
            'remaining': None,
            'size': None
        }
        if self.stoptime is not None:
            result['remaining'] = self.stoptime - time.time()
        if self.logger is not None:
            result['file'] = self.logger.filename
            result['size'] = self.logger.get_filesize()
        return result

    def data_received(self, prd, timestamp, line):
        if time.time() >= self.stoptime:
            self.stop()

        if self.logger is not None:
            self.logger.write(timestamp, 'd', line)

        self.livetrace.append([timestamp, line])

    def livetrace_get(self, last_timestamp=0):
        result = []
        for item in self.livetrace:
            if item[0] > last_timestamp:
                result.append(item)
        return result
Exemple #52
0
from datalogger import DataLogger
from time import sleep
import animasjoner
import tekster

logger = DataLogger()
tekster.init_tekst()
while True:
    logger.log_data()
    sleep(1)
    animasjoner.vis_animasjon()
    tekster.vis_tekst()
Exemple #53
0
import sys
from qtpy import QtWidgets
import os.path as osp
dir_path = osp.split(osp.dirname(osp.realpath(__file__)))[0]
sys.path.append(dir_path)  # In case datalogger not
# accessible in normal PYTHONPATH

if __name__ == '__main__':
    directory = sys.argv[1]

    APP = QtWidgets.QApplication(sys.argv)

    from datalogger import DataLogger

    DLG = DataLogger(directory)

    APP.exec_()
Exemple #54
0
# Set up logging
if config.getboolean('monitor', 'debug', fallback=False):
    print("Debug enabled")
    level = logging.DEBUG
else:
    level = logging.INFO
duallog.setup('solar-monitor',
              minLevel=level,
              fileLevel=level,
              rotation='daily',
              keep=30)

# Set up data logging
# datalogger = None
datalogger = DataLogger(config)

# Set up device manager and adapter
device_manager = SolarDeviceManager(adapter_name=config['monitor']['adapter'])
logging.info("Adapter status - Powered: {}".format(
    device_manager.is_adapter_powered))
if not device_manager.is_adapter_powered:
    logging.info("Powering on the adapter ...")
    device_manager.is_adapter_powered = True
    logging.info("Powered on")

# Run discovery
device_manager.update_devices()
logging.info("Starting discovery...")
# scan all the advertisements from the services list
device_manager.start_discovery()
Exemple #55
0
    def run(self):

        sensors = None
        datalogger = None

        try:

            # initialise objects
            sensors = Sensors(self.T_OFFSET,self.P_OFFSET,self.H_OFFSET)
            datalogger = DataLogger(self.DATALOGFILE, self.DATALOGKEYS, self.DATALOGMAXFILESIZE)
            self.scheduler = HeaterScheduler()
            self.modemanager = ModeManager( scheduler=self.scheduler,
                                            callbackfunc=self.modemanagercallbackfunc)
            self.mqttclient = MqttClient(subscribelist=self.SUBSCRIBELIST)
            self.heatercontroller = HeaterController()
            
            # initialise state
            self.modemanager.setMode(self.modemanager.AUTO)

            # initial data
            t_avg,p_avg,h_avg = sensors.getData()

            # present ourselves
            self.mqttclient.publish({topics.IOTHERMSTATUS:'Active'})
            self.mqttclient.publish({topics.IOTHERMVERSION:__version__ +' '+ __status__})
            self.mqttclient.publish({topics.MODE:self.modemanager.currentmode.name})
            self.mqttclient.publish({topics.TARGETTEMPERATURE:self.modemanager.currentmode.targettemperature})

            t = 0
            
            while True:
               t_,p,h = sensors.getData()
               messages = self.mqttclient.getData()
               
               # use temperature value from mqtt
               if self.USE_OTHERTEMPERATURE and topics.OTHERTEMPERATURE in messages:
                   t = float( messages[topics.OTHERTEMPERATURE] )
            
               if not self.USE_OTHERTEMPERATURE:
                   t = t_
            
               # calculate averages
               t_avg = (t_avg + t)/2       
               p_avg = (p_avg + p)/2       
               h_avg = (h_avg + h)/2

               # calculate derivatives
               d_t = (t - t_avg)/self.REFRESH_SECONDS
               d_p = (p - p_avg)/self.REFRESH_SECONDS
               d_h = (h - h_avg)/self.REFRESH_SECONDS

               # process data from subscribed topics
               self.processMessages( messages )
           
               # prepare for publishing
               messages = self.prepareMessages(t, p, h, d_t, d_p, d_h)
               datalogger.log(messages)
               self.mqttclient.publish(messages)

               # update the heatercontroller with the current and target temperatures
               #print('targettemperature = {}'.format(self.modemanager.currentmode.targettemperature))
               self.heatercontroller.update(t,self.modemanager.currentmode.targettemperature)

               sleep(self.REFRESH_SECONDS)

        finally:
            print('IOThermostat: Stopping IOThermostat..')
            if datalogger:
                datalogger.close()
                
            if sensors:
                sensors.close()
                
            if self.scheduler:
                self.scheduler.close()
                
            if self.heatercontroller:
                self.heatercontroller.close()
                
            if self.mqttclient:
                self.mqttclient.close()