示例#1
0
def takesnap(path='/var/www/webcam/images/', filename='current.jpg', quality=75, width=None, timeout=2000):
    # import picamera
    import subprocess
    import os
    from iiutilities.datalib import timestringtoseconds
    from iiutilities.datalib import gettimestring
    # camera = picamera.PiCamera()

    imagepath = path + filename
    timestamp = gettimestring()
    timestamppath = imagepath + '.timestamp'

    time1 = gettimestring()
    if width:
        height = int(float(width) / 1.33333)
        subprocess.call(['raspistill','-q', str(quality), '--width', str(width), '--height', str(height), '-t', str(timeout), '-o', imagepath])
    else:
        width =  2592
        height = 1944
        subprocess.call(['raspistill','-q', str(quality), '-t', str(timeout), '-o', imagepath])

    with open(timestamppath,'w') as f:
        f.write(timestamp)
    f.close()
    # camera.capture(path + filename)
    time2 = gettimestring()

    elapsedtime = timestringtoseconds(time2) - timestringtoseconds(time1)
    try:
        imagesize = os.path.getsize(imagepath)
    except:
        imagesize = 0

    return {'elapsedtime':elapsedtime, 'imagepath':imagepath, 'timestamp':timestamp, 'timestamppath': timestamppath, 'imageheight':height, 'imagewidth':width, 'imagesize':imagesize}
示例#2
0
def takesnap(path='/var/www/webcam/images/',
             filename='current.jpg',
             quality=75,
             width=None,
             timeout=2000):
    # import picamera
    import subprocess
    import os
    from iiutilities.datalib import timestringtoseconds
    from iiutilities.datalib import gettimestring
    # camera = picamera.PiCamera()

    imagepath = path + filename
    timestamp = gettimestring()
    timestamppath = imagepath + '.timestamp'

    time1 = gettimestring()
    if width:
        height = int(float(width) / 1.33333)
        subprocess.call([
            'raspistill', '-q',
            str(quality), '--width',
            str(width), '--height',
            str(height), '-t',
            str(timeout), '-o', imagepath
        ])
    else:
        width = 2592
        height = 1944
        subprocess.call([
            'raspistill', '-q',
            str(quality), '-t',
            str(timeout), '-o', imagepath
        ])

    with open(timestamppath, 'w') as f:
        f.write(timestamp)
    f.close()
    # camera.capture(path + filename)
    time2 = gettimestring()

    elapsedtime = timestringtoseconds(time2) - timestringtoseconds(time1)
    try:
        imagesize = os.path.getsize(imagepath)
    except:
        imagesize = 0

    return {
        'elapsedtime': elapsedtime,
        'imagepath': imagepath,
        'timestamp': timestamp,
        'timestamppath': timestamppath,
        'imageheight': height,
        'imagewidth': width,
        'imagesize': imagesize
    }
示例#3
0
def handle_unit_tests(**kwargs):

    settings = {
        'notifications':[]
    }
    settings.update(kwargs)

    from iiutilities import utility
    import cupidunittests
    import pilib
    from iiutilities import datalib

    system_database = pilib.dbs.system
    notifications_database = pilib.dbs.notifications

    unittestresults = cupidunittests.runalltests()

    # print('** Unit TEST RESULTS ** ')
    # print(unittestresults['totalerrorcount'],unittestresults['totalfailurecount'])
    if unittestresults['totalerrorcount'] > 0 or unittestresults['totalfailurecount'] > 0:
        unitnotify = next((item for item in settings['notifications'] if item['item'] == 'unittests' and int(item['enabled'])),
                          None)

        if unitnotify:
            options = datalib.parseoptions(unitnotify['options'])
            if 'type' in options:
                if options['type'] == 'email' and 'email' in options:
                    currenttime = datalib.gettimestring()
                    lastnotificationtime = unitnotify['lastnotification']
                    # default
                    frequency = 600
                    if 'frequency' in options:
                        try:
                            frequency = float(options['frequency'])
                        except:
                            pass

                    elapsedtime = datalib.timestringtoseconds(currenttime) - datalib.timestringtoseconds(
                        lastnotificationtime)
                    # print(elapsedtime,frequency)
                    if elapsedtime > frequency:
                        # Queue a message indicating we had to restart the systemstatus daemon
                        message = 'CuPID has failed unittests. Details follow:\r\n\r\n'
                        message += unittestresults['stringresult'].replace('\'', '"')
                        # message += '\r\n\r\n'
                        # message +=

                        subject = 'CuPID : ' + hostname + ' : unittests'
                        notification_database.insert('queuednotifications',
                                                     {'type': 'email', 'message': message,
                                                      'options': 'email:' + options['email'] + ',subject:' + subject,
                                                      'queuedtime': currenttime})
                        system_database.set_single_value('notifications', 'lastnotification', currenttime,
                                                         condition="item='unittests'")
示例#4
0
def checklivesessions(authdb, user, expiry):
    import time
    from iiutilities.datalib import timestringtoseconds
    activesessions = 0
    sessions = dbs.authdb.read_table('sessions')
    for session in sessions:
        sessioncreation = timestringtoseconds(session['timecreated'])
        currenttime = time.mktime(time.localtime())
        if currenttime - sessioncreation < expiry:
            activesessions += 1

    return activesessions
示例#5
0
def checklivesessions(authdb, user, expiry):
    import time
    from iiutilities.datalib import timestringtoseconds
    activesessions = 0
    sessions = dbs.authdb.read_table('sessions')
    for session in sessions:
        sessioncreation = timestringtoseconds(session['timecreated'])
        currenttime = time.mktime(time.localtime())
        if currenttime - sessioncreation < expiry:
            activesessions += 1

    return activesessions
示例#6
0
def checkifdisableready(outputname, outputs):
    from time import time
    from iiutilities.datalib import timestringtoseconds

    # Find the variables we want
    for output in outputs:
        if output['name'] == outputname:
            minontime = output['minontime']
            ontime = timestringtoseconds(output['ontime'])

    if time() - ontime > minontime:
        return True
    else:
        return False
示例#7
0
    def process(self):
        from iiutilities import datalib

        # TODO: Always determine status. This loads the value into the indicators, etc.

        if self.enabled:
            act = False

            self.statusmsg = datalib.gettimestring() + ' : Enabled and processing. '

            last_status = bool(self.status)

            # Update status.
            self.determine_status()

            # retrofit. i lazy.
            currstatus = bool(self.status)

            if last_status:
                self.statusmsg += 'Last status is ' + str(last_status) + '. Currstatus is ' + str(currstatus) + '. '
            else:
                self.statusmsg += 'Last status is ' + str(last_status) + '. Currstatus is ' + str(currstatus) + '. '

            currenttime = datalib.gettimestring()

            # if status is true and current status is false, set ontime (or if on/off time field is empty)
            if currstatus and (not last_status or not self.ontime):
                # print(str(curstatus) + ' ' + str(self.status))
                self.statusmsg += 'Setting status ontime. '
                self.ontime = datalib.gettimestring()
            elif not currstatus and (last_status or not self.offtime):
                self.statusmsg += 'Setting status offtime. '
                self.offtime = datalib.gettimestring()

            # print('CURR STATUS',currstatus)
            # print('SELF.ACTIVE',self.active)
            # if status is true and alarm isn't yet active, see if ondelay exceeded
            if currstatus and not self.active:
                # print(pilib.timestringtoseconds(currenttime))
                statusontime = datalib.timestringtoseconds(currenttime) - datalib.timestringtoseconds(self.ontime, defaulttozero=True)
                # print(statusontime)
                if statusontime >= float(self.ondelay):
                    self.statusmsg += 'Setting action active. '
                    self.active = 1
                else:

                    self.statusmsg += 'On delay not reached. '
                    # print('on',self.ontime)
                    # print('now',currenttime)

            # if status is not true and alarm is active, see if offdelay exceeded
            if not currstatus and self.active:
                statusofftime = datalib.timestringtoseconds(currenttime) - datalib.timestringtoseconds(self.offtime, defaulttozero=True)
                if statusofftime >= float(self.offdelay):
                    self.statusmsg += 'Setting action inactive. '
                    self.active = 0

                    # act on inactive transition
                    # Send an alert / reset indicator if activereset is on
                    if self.activereset:
                        time_since_last_action = datalib.timestringtoseconds(currenttime) - datalib.timestringtoseconds(
                            self.lastactiontime, defaulttozero=True)
                        if time_since_last_action >= float(self.actionfrequency):
                            act = True
                            self.statusmsg += "Time to act on activereset. " + str(
                                time_since_last_action) + ' since last action, with action frequency of ' + str(
                                self.actionfrequency) + '. '
                        else:
                            act = False
                            self.statusmsg += "Not yet time to act."

                else:
                    self.statusmsg += 'Off delay not reached. '

            # test to see if it is time to alert, based on delay ond alert time
            # print(self.statusmsg)
            if self.active:
                # check to see if it is time to alert
                # For things like outputs, actionfrequency should be zero to always enforce that action is on.

                # print(pilib.timestringtoseconds(currenttime))
                # print(pilib.timestringtoseconds(self.lastactiontime))
                # print(float(self.actionfrequency))
                # print(pilib.timestringtoseconds(currenttime)-pilib.timestringtoseconds(self.lastactiontime))
                time_since_last_action =  datalib.timestringtoseconds(currenttime) - datalib.timestringtoseconds(self.lastactiontime, defaulttozero=True)
                if time_since_last_action >= float(self.actionfrequency):
                    act = True
                    self.statusmsg += "Time to act. " + str(time_since_last_action) + ' since last action, with action frequency of ' + str(self.actionfrequency) + '. '
                else:
                    act = False
                    self.statusmsg += "Not yet time to act."
            else:
                # Active reset only happens on the transition.
                pass

            if act:
                # We're ready to alert or alert again.
                self.lastactiontime = currenttime
                if currstatus:
                    self.onact()
                else:
                    self.offact()
        else:
            self.statusmsg += 'Action disabled.'
            self.status = 0
"""

# Determine whether this process is enabled:

enabled = dblib.sqlitedatumquery(pilib.dirs.dbs.system, 'select sessioncontrolenabled from \'systemstatus\'')

while enabled:
    #print('enabled')
    polltime = dblib.sqlitedatumquery(pilib.dirs.dbs.session, 'select updatefrequency from \'settings\'')

    # Go through sessions and delete expired ones
    sessions = pilib.dirs.dbs.session.read_table('sessions')
    arrayquery = []
    for session in sessions:
        sessionstart = datalib.timestringtoseconds(session['timecreated'])
        sessionlength = session['sessionlength']
        if time.time() - sessionstart > sessionlength:
            arrayquery.append('delete from sessions where sessionid=\'' + session['sessionid'] + '\'')

    # Delete offensive sessions 
    dblib.sqlitemultquery(pilib.dirs.dbs.session, arrayquery)

    # Reload surviving sessions and summarize
    sessions = pilib.dirs.dbs.session.read_table('sessions')
    sessiondictarray = []
    for session in sessions:
        found = 0
        for dict in sessiondictarray:
            if dict['username'] == session['username']:
                found = 1
示例#9
0
# Determine whether this process is enabled:

enabled = dblib.sqlitedatumquery(
    pilib.dirs.dbs.system,
    'select sessioncontrolenabled from \'systemstatus\'')

while enabled:
    #print('enabled')
    polltime = dblib.sqlitedatumquery(
        pilib.dirs.dbs.session, 'select updatefrequency from \'settings\'')

    # Go through sessions and delete expired ones
    sessions = pilib.dirs.dbs.session.read_table('sessions')
    arrayquery = []
    for session in sessions:
        sessionstart = datalib.timestringtoseconds(session['timecreated'])
        sessionlength = session['sessionlength']
        if time.time() - sessionstart > sessionlength:
            arrayquery.append('delete from sessions where sessionid=\'' +
                              session['sessionid'] + '\'')

    # Delete offensive sessions
    dblib.sqlitemultquery(pilib.dirs.dbs.session, arrayquery)

    # Reload surviving sessions and summarize
    sessions = pilib.dirs.dbs.session.read_table('sessions')
    sessiondictarray = []
    for session in sessions:
        found = 0
        for dict in sessiondictarray:
            if dict['username'] == session['username']:
示例#10
0
def analyze_and_histo_access_db(dbpath=access_dbpath):
    from iiutilities import dblib
    from iiutilities import datalib

    tablename = 'access_log'
    access_db = dblib.sqliteDatabase(dbpath)
    access_db_tablenames = access_db.get_table_names()
    access_records = access_db.read_table(tablename)

    access_meta = {
        'total_hits': {},
        'remote_hits': {},
        'hourly_hits': {},
        'not_found': [],
        'dbpath': dbpath,
        'tablename': tablename
    }
    for record in access_records:
        analyze_access_entry(record)
        if not record['domain']:
            pass
            # print('no domain for entry')
            # print(record)
        if record['domain'] in access_meta['total_hits']:
            access_meta['total_hits'][record['domain']]['times'].append(
                record['time'])
        else:
            access_meta['total_hits'][record['domain']] = {
                'times': [record['time']]
            }

        if not record['local']:
            if record['domain'] in access_meta['remote_hits']:
                access_meta['remote_hits'][record['domain']]['times'].append(
                    record['time'])
            else:
                access_meta['remote_hits'][record['domain']] = {
                    'times': [record['time']]
                }

        if record['status'] == '404':
            access_meta['not_found'].append({
                'url': record['full_request'],
                'time': record['time']
            })

    # NOw process time resolved data into tables
    # this should be better iterate (DRY) but this works
    for domain_name, domain_data in access_meta['total_hits'].items():

        domain_data['times'].sort()

        # Find first time
        first_time = datalib.timestringtoseconds(domain_data['times'][0])

        # Go back to last incremental hour
        first_hour_time_seconds = first_time - first_time % 3600

        # Find last hour (this actually just means that all are within the hour following this)
        last_time = datalib.timestringtoseconds(domain_data['times'][-1])

        last_hour_time_seconds = last_time - last_time % 3600

        bin_times = []
        bin_values = []
        num_bins = int(last_hour_time_seconds -
                       first_hour_time_seconds) / 3600 + 1
        for i in range(num_bins):
            bin_times.append(first_hour_time_seconds + i * 3600)
            bin_values.append(0)

        for time in domain_data['times']:
            time_seconds = datalib.timestringtoseconds(time)
            for index, bin_time in enumerate(bin_times):
                if index == num_bins - 1 or time_seconds < bin_times[index +
                                                                     1]:
                    bin_values[index] += 1
                    break

        domain_data['histo_data'] = {}

        for bin_time, bin_value in zip(bin_times, bin_values):
            # Put time in middle of hour
            domain_data['histo_data'][datalib.gettimestring(bin_time +
                                                            1800)] = bin_value

    for domain_name, domain_data in access_meta['remote_hits'].items():

        domain_data['times'].sort()

        # Find first time
        first_time = datalib.timestringtoseconds(domain_data['times'][0])

        # Go back to last incremental hour
        first_hour_time_seconds = first_time - first_time % 3600

        # Find last hour (this actually just means that all are within the hour following this)
        last_time = datalib.timestringtoseconds(domain_data['times'][-1])

        last_hour_time_seconds = last_time - last_time % 3600

        bin_times = []
        bin_values = []
        num_bins = int(last_hour_time_seconds -
                       first_hour_time_seconds) / 3600 + 1
        for i in range(num_bins):
            bin_times.append(first_hour_time_seconds + i * 3600)
            bin_values.append(0)

        for time in domain_data['times']:
            time_seconds = datalib.timestringtoseconds(time)
            for index, bin_time in enumerate(bin_times):
                if index == num_bins - 1 or time_seconds < bin_times[index +
                                                                     1]:
                    bin_values[index] += 1
                    break

        domain_data['histo_data'] = {}

        for bin_time, bin_value in zip(bin_times, bin_values):
            # Put time in middle of hour
            domain_data['histo_data'][datalib.gettimestring(bin_time +
                                                            1800)] = bin_value

    if access_db.queued_queries:
        access_db.execute_queue()

    return access_meta
示例#11
0
def watchdognetstatus(allnetstatus={}):

    from iiutilities import utility
    from cupid import pilib
    from iiutilities import datalib
    from cupid import netconfig
    from iiutilities import dblib

    """
    And now comes the checking of configuration specific statuses and restarting them if enabled
    and necessary

    We are getting updated status information for each interface. 
    
    We have configuration info for interfaces. We compare the two based on mode and decide if we need to run 
    a netconfig on each interface. We do this by running through, interface by interface on netconfigstatus, and 
    comparing. We then add the name to interfaces we need to reconfig and pass to netconfig().
    
    We ignore interfaces we don't have a config for so we ignore things like hamachi interfaces, loopback, GSM, etc.

    """

    if 'ifaces_config' not in allnetstatus or 'ifaces_status' not in allnetstatus:
        allnetstatus = update_net_status()

    netconfig_data = allnetstatus['netconfig_data']
    netstatus = allnetstatus['netstatusdict']
    ifaces_config = allnetstatus['ifaces_config']
    ifaces_status = allnetstatus['ifaces_status']


    statusmsg = ''
    currenttime = datalib.gettimestring()

    reconfig_interfaces = []
    for iface_name, iface_status in ifaces_status.items():
        utility.log(pilib.dirs.logs.network, 'Checking status of interface {}. '.format(iface_name, 3, pilib.loglevels.network))
        if iface_status['status'] == 'fail':
            reconfig_interfaces.append(iface_name)
            utility.log(pilib.dirs.logs.network,
                'Interface has fail status. Setting reconfig for {}. '.format(iface_name, 1, pilib.loglevels.network))


    # Now do some sleuthing if we are being stringent about WAN access. Have to be careful about this if we are on a
    # private network

    run_WAN_reconfig = False
    if netconfig_data['requireWANaccess']:
        utility.log(pilib.dirs.logs.network, 'Requiring WAN access. Checking status and times. ', 3, pilib.loglevels.network)
        # print('NETSTATUS')
        # print(netstatus)
        if not netstatus['WANaccess']:
            utility.log(pilib.dirs.logs.network, 'No WANaccess. Checking offline time. ', 2, pilib.loglevels.network)
            try:
                offlinetime = netstatus['offlinetime']
            except:
                # print('netstatus ERROR')
                utility.log(pilib.dirs.logs.network, 'Error getting offlinetime. ', 2, pilib.loglevels.network)


            offlineperiod = datalib.timestringtoseconds(datalib.gettimestring()) - datalib.timestringtoseconds(offlinetime)

            utility.log(pilib.dirs.logs.network, 'We have been offline for ' + str(offlineperiod))

            # When did we last restart the network config? Is it time to again?
            timesincelastnetrestart = datalib.timestringtoseconds(
                datalib.gettimestring()) - datalib.timestringtoseconds(netstatus['lastnetreconfig'])

            utility.log(pilib.dirs.logs.network, 'It has been ' + str(timesincelastnetrestart) + ' seconds since we last restarted the network configuration. ')

            # Require that offline time is greater than WANretrytime
            if timesincelastnetrestart > int(netconfig_data['WANretrytime']) and offlineperiod >  int(netconfig_data['WANretrytime']):
                utility.log(pilib.dirs.logs.network, 'We are not online, and it has been long enough, exceeding retry time of ' + str(int(netconfig_data['WANretrytime'])))
                dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'lastnetreconfig', datalib.gettimestring())

                # We do reset the WAN offline time in the reboot sequence, hwoever.

                restarts = int(dblib.getsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'WANaccessrestarts'))
                restarts += 1
                dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'WANaccessrestarts', restarts)

                utility.log(pilib.dirs.logs.network, 'Going to run netconfig to correct WAN access.')
                run_WAN_reconfig = True

            else:
                utility.log(pilib.dirs.logs.network, 'Not yet time to run netconfig to correct WAN access. Retry time set at ' + str(netconfig_data['WANretrytime']))
        else:
            utility.log(pilib.dirs.logs.network, 'WANAccess is fine. ')

    if run_WAN_reconfig:
        # Set bad status in netstatus
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstate', 0)

        # Set ok time to '' to trigger rewrite next time status is ok
        lastoktime = dblib.getsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstateoktime')
        if not lastoktime:
            dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstateoktime', datalib.gettimestring())
        else:
            if netconfig_data['rebootonfail']:
                offlinetime = datalib.timestringtoseconds(datalib.gettimestring()) - datalib.timestringtoseconds(lastoktime)
                if offlinetime > int(netconfig_data['rebootonfailperiod']):

                    # Set to '' so we get another full fail period before rebooting again
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstateoktime', '')

                    # Same thing for WAN offline time
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'offlinetime', '')

                    bootcounts = int(dblib.getsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netrebootcounter'))
                    bootcounts += 1
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netrebootcounter', str(bootcounts))

                    # Set system flag to reboot
                    utility.log(pilib.dirs.logs.system, 'REBOOTING to try to fix network', 0, pilib.loglevels.system)
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'systemflags', 'reboot', 1)


        # Figure out which interfaces to restart to fix WAN issues

        for interface_name, interface in ifaces_config.items():
            utility.log(pilib.dirs.logs.network, 'Adding interface {} to reconfig list'.format(interface_name), 1, pilib.loglevels.network)
            if interface['mode'] in ['status', 'station', 'dhcp']:
                reconfig_interfaces.append(interface_name)

    else:
        # Clear bad status in netstatus and set netoktime
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'statusmsg', 'Mode appears to be set.')
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstate', 1)
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstateoktime', datalib.gettimestring())

    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'statusmsg', statusmsg)
    if reconfig_interfaces:
        utility.log(pilib.dirs.logs.network, 'Running netreconfig on list: {}'.format(reconfig_interfaces), 1,
                    pilib.loglevels.network)

        netconfig.runconfig(ifaces_to_configure=reconfig_interfaces, config=ifaces_config, config_all=False)
示例#12
0
def updateowfsdevices(busdevices, myProxy=None, debug=False):
    from cupid import pilib
    from iiutilities import dblib
    from iiutilities import datalib
    from iiutilities import utility

    # get defaults
    defaults = pilib.dirs.dbs.control.read_table('defaults')
    default_dict={}
    for default_item in defaults:
        default_dict[default_item['valuename']] = default_item['value']

    # get current entries
    previnputs = pilib.dirs.dbs.control.read_table('inputs')

    # Make list of IDs for easy indexing
    previnputids = []
    for input in previnputs:
        previnputids.append(input['id'])

    # Iterate over devices. Determine if values exist for polltime, frequency.
    # If so, update the device. If not, use defaults.
    # Then determine whether we should update value or not (Read temperature)

    for index, device in enumerate(busdevices):
        # print(device.__dict__)
        if device.sensorid in previnputids:
            try:
                newpollfreq = float(previnputs[previnputids.index(device.sensorid)]['pollfreq'])
            except ValueError:
                device.pollfreq = float(default_dict['inputpollfreq'])
            else:
                if newpollfreq >= 0:
                    device.pollfreq = float(previnputs[previnputids.index(device.sensorid)]['pollfreq'])
                else:
                    device.pollfreq = float(default_dict['inputpollfreq'])

            device.ontime = previnputs[previnputids.index(device.sensorid)]['ontime']
            device.offtime = previnputs[previnputids.index(device.sensorid)]['offtime']
            device.polltime = previnputs[previnputids.index(device.sensorid)]['polltime']
            device.value = previnputs[previnputids.index(device.sensorid)]['value']
            device.log_options = previnputs[previnputids.index(device.sensorid)]['log_options']
        else:
            device.pollfreq = float(default_dict['inputpollfreq'])
            device.ontime = ''
            device.offtime = ''
            device.polltime = ''
            device.value = ''

        """
        We're going to set a name because calling things by their ids is getting
        a bit ridiculous, but we can't have empty name fields if we rely on them
        being there. They need to be unique, so we'll name them by type and increment them

        Not really sure why this is conditional?
        """

        if device.type in ['DS18B20', 'DS1825']:

            # Get name if one exists
            name = dblib.sqlitedatumquery(pilib.dirs.dbs.control, 'select name from ioinfo where id=\'' + device.sensorid + '\'')

            # If doesn't exist, check to see if proposed name exists. If it doesn't, add it.
            # If it does, keep trying.

            if name == '':
                for rangeindex in range(100):
                    # check to see if name exists
                    name = device.type + '-' + str(int(index + 1))
                    # print(name)
                    foundid = dblib.sqlitedatumquery(pilib.dirs.dbs.control, 'select id from ioinfo where name=\'' + name + '\'')
                    # print('foundid' + foundid)
                    if foundid:
                        pass
                    else:
                        dblib.sqlitequery(pilib.dirs.dbs.control, dblib.makesqliteinsert('ioinfo', valuelist=[device.sensorid, name],
                                                                                        valuenames=['id', 'name']))
                        break
            device.name = name

            device.time_since_last = datalib.timestringtoseconds(datalib.gettimestring()) - datalib.timestringtoseconds(device.polltime, defaulttozero=True)

            # Is it time to read temperature?
            if device.time_since_last > device.pollfreq:
                utility.log(pilib.dirs.logs.io, 'reading temperature [' + device.name + '][' + device.id + ']' , 9, pilib.loglevels.io)
                device.readprop('temperature', myProxy)
                device.polltime = datalib.gettimestring()
                device.value = device.temperature.decode('utf-8')
            else:
                utility.log(pilib.dirs.logs.io, 'not time to poll', 9, pilib.loglevels.io, )
                # print('not time to poll')

            device.unit = 'F'

        # We update the device and send them back for other purposes.
        busdevices[index] = device

    return busdevices
示例#13
0
def process_channel(**kwargs):

    systemstatus = system_db.read_table_row('systemstatus')[0]
    if 'channel' in kwargs:
        channel = kwargs['channel']
    elif 'channel_name' in kwargs:
        channels = control_db.read_table(
            'channels', '"name"=\'' + kwargs['channel_name'] + "'")
        if len(channels) == 1:
            channel = channels[0]
        else:
            print('wrong number of channels returned. aborting')
            return

    # channelindex = str(int(channel['channelindex']))
    logtablename = 'channel' + '_' + channel['name'] + '_log'
    time = datalib.gettimestring()
    disableoutputs = True

    status_msg = channel['name'] + ': '

    log_tablenames = log_db.get_table_names()

    # Channel enabled means different things for different types of channels

    channel_condition = '"name"=\'{}\''.format(channel['name'])

    # Create log if it doesn't exist
    if logtablename not in log_tablenames:
        log_db.create_table(logtablename, pilib.schema.channel_datalog)

    if channel['type'] == 'local':

        if channel['enabled']:

            status_msg = ''
            try:
                setpoint_value = float(channel['setpoint_value'])
            except:
                channel['enabled'] = 0
                status_msg += 'Error with setpoint. Disabling'
                control_db.set_single_value('channels', 'enabled', 0,
                                            channel_condition)

            # Need to test for age of data. If stale or disconnected, invalidate
            try:
                process_value = float(channel['process_value'])
            except:
                status_msg += 'Invalid control value. Disabling channel. '
                channel['enabled'] = 0
                control_db.set_single_value('channels', 'enabled', 0,
                                            channel_condition)

        # Move forward if still enabled after error-checking
        if channel['enabled']:

            status_msg += 'Channel Enabled. '

            # TODO : look at channel auto mode.
            if channel['mode'] == 'auto':
                status_msg += 'Mode:Auto. '
                # print('running auto sequence')

                # run algorithm on channel

                response = controllib.runalgorithm(pilib.dirs.dbs.control,
                                                   pilib.dirs.dbs.session,
                                                   channel['name'])
                action = response[0]
                message = response[1]

                status_msg += ' ' + response[1] + ' '
                status_msg += 'Action: ' + str(action) + '. '

                # Set action in channel

                controllib.setaction(pilib.dirs.dbs.control, channel['name'],
                                     action)

            elif channel['mode'] == 'manual':
                # print('manual mode')
                status_msg += 'Mode:Manual. '
                action = controllib.getaction(pilib.dirs.dbs.control,
                                              channel['name'])
            else:
                # print('error, mode= ' + mode)
                status_msg += 'Mode:Error. '

            if systemstatus['enableoutputs']:
                status_msg += 'System outputs enabled. '
                if channel['outputs_enabled']:
                    status_msg += 'Channel outputs enabled. '
                    disableoutputs = False

                    # find out whether action is positive or negative or
                    # not at all.

                    # and act. for now, this is binary, but in the future
                    # this will be a duty cycle daemon

                    outputsetnames = []
                    outputresetnames = []
                    if action > 0:
                        print("set positive output on")
                        outputsetnames.append(channel['positive_output'])
                        outputresetnames.append(channel['negative_output'])
                    elif action < 0:
                        print("set negative output on")
                        outputsetnames.append(channel['negative_output'])
                        outputresetnames.append(channel['positive_output'])
                    elif action == 0:
                        status_msg += 'No action. '
                        outputresetnames.append(channel['positive_output'])
                        outputresetnames.append(channel['negative_output'])
                    else:
                        status_msg += 'Algorithm error. Doing nothing.'

                    # Check to see if outputs are ready to enable/disable
                    # If not, pull them from list of set/reset

                    control_algorithm = control_db.read_table(
                        'controlalgorithms',
                        condition='"name"=\'' + channel['controlalgorithm'] +
                        "'")
                    if len(control_algorithm) == 1:
                        algorithm = control_algorithm[0]
                    else:
                        status_msg += 'Algorithm Error: Not found (or multiple?). Using default. '
                        algorithm = default_control_algorithm

                    outputstoset = []
                    for outputname in outputsetnames:
                        offtime = control_db.get_single_value(
                            'outputs',
                            'offtime',
                            condition='"name"=\'' + outputname + "'")
                        if datalib.timestringtoseconds(datalib.gettimestring(
                        )) - datalib.timestringtoseconds(
                                offtime) > algorithm['minofftime']:
                            outputstoset.append(outputname)
                        else:
                            status_msg += 'Output ' + outputname + ' not ready to enable. '

                    outputstoreset = []
                    for outputname in outputresetnames:
                        ontime = control_db.get_single_value(
                            'outputs',
                            'ontime',
                            condition='"name"=\'' + outputname + "'")
                        if datalib.timestringtoseconds(datalib.gettimestring(
                        )) - datalib.timestringtoseconds(
                                ontime) > algorithm['minontime']:
                            outputstoreset.append(outputname)
                        else:
                            status_msg += 'Output ' + outputname + ' not ready to disable. '
                    """ TODO: Change reference to controlinputs to name rather than id. Need to double-check
                    enforcement of no duplicates."""

                    # Find output in list of outputs if we have one to set

                    time = datalib.gettimestring()
                    if len(outputstoset) > 0 or len(outputstoreset) > 0:
                        for output in outputs:
                            id_condition = '"id"=\'' + output['id'] + "'"
                            if output['name'] in outputstoset:

                                # check current status
                                currvalue = output['value']
                                if not currvalue:  # No need to set if otherwise. Will be different for analog out
                                    # set ontime
                                    control_db.set_single_value('outputs',
                                                                'ontime',
                                                                time,
                                                                id_condition,
                                                                queue=True)
                                    # set value
                                    control_db.set_single_value('outputs',
                                                                'value',
                                                                1,
                                                                id_condition,
                                                                queue=True)
                                    status_msg += 'Output ' + output[
                                        'name'] + ' enabled. '
                                else:
                                    status_msg += 'Output ' + output[
                                        'name'] + ' already enabled. '

                            if output['name'] in outputstoreset:
                                # check current status
                                currvalue = output['value']
                                if currvalue:  # No need to set if otherwise. Will be different for analog out
                                    # set ontime
                                    control_db.set_single_value('outputs',
                                                                'offtime',
                                                                time,
                                                                id_condition,
                                                                queue=True)
                                    # set value
                                    control_db.set_single_value('outputs',
                                                                'value',
                                                                0,
                                                                id_condition,
                                                                queue=True)
                                    status_msg += 'Output ' + output[
                                        'name'] + ' disabled. '
                                else:
                                    status_msg += 'Output ' + output[
                                        'name'] + ' already disabled. '

                else:
                    status_msg += 'Channel outputs disabled. '
                    action = 0

            else:
                status_msg += 'System outputs disabled. '
                action = 0

            # Insert entry into control log
            insert = {
                'time': time,
                'process_value': channel['process_value'],
                'setpoint_value': channel['setpoint_value'],
                'action': channel['action'],
                'algorithm': channel['algorithm_name'],
                'enabled': channel['enabled'],
                'status_msg': status_msg
            }
            control_db.insert(logtablename, insert, queue=True)

            log_options = datalib.parseoptions(channel['log_options'])
            log_db.size_table(logtablename, **log_options)
        else:
            # Chanel is disabled. Need to do active disable here.
            pass

    elif channel['type'] == 'remote':
        status_msg += 'Remote channel. '

        if channel['pending']:

            from iiutilities.datalib import parseoptions, dicttojson
            status_msg += 'Processing pending action. '
            pending = parseoptions(channel['pending'])

            if 'setpoint_value' in pending:
                status_msg += 'processing setpoint_value. '
                # Get control output and have a look at it.
                input_name = channel['sv_input']

                # try:
                inputs = control_db.read_table('inputs',
                                               '"name"=\'' + input_name + "'")
                # except:
                #     status_msg += 'Inputs query error. '
                #     return status_msg

                if len(inputs) == 1:
                    input = inputs[0]
                else:
                    status_msg += 'wrong number of query items returned, length: ' + str(
                        len(inputs)
                    ) + ' for query on input name: ' + input_name
                    print('ERROR: ' + status_msg)
                    return status_msg

                # write_to_input(input, value)
                if input['type'] == 'MBTCP':

                    input_id = input['id']

                    # Now, using this id, we can determine uniquely which MBTCP entry it came from
                    splits = input_id.split('_')
                    interfaceid = splits[0]
                    register = splits[1]
                    length = splits[2]

                    string_condition = dblib.string_condition_from_lists(
                        ['interfaceid', 'register', 'length'],
                        [interfaceid, register, length])
                    input_mb_entry = control_db.read_table(
                        'modbustcp', string_condition)[0]

                    # Get IP address
                    address = control_db.get_single_value(
                        'interfaces', 'address',
                        '"id"=\'' + input_mb_entry['interfaceid'] + "'")

                    from iiutilities import netfun

                    if input_mb_entry['options']:
                        input_options = parseoptions(input_mb_entry['options'])
                        if 'scale' in input_options:
                            pending['setpoint_value'] = float(
                                pending['setpoint_value']) / float(
                                    input_options['scale'])

                    try:
                        result = netfun.writeMBcodedaddresses(
                            address,
                            register, [float(pending['setpoint_value'])],
                            convert=input_mb_entry['format'])
                    except:
                        status_msg += 'Error in modbus'
                    else:
                        if result['statuscode'] == 0:

                            # Clear pending setpoint_value
                            pending.pop('setpoint_value', None)
                            pending_string = dicttojson(pending)
                            print('setting pending in setpointvaleu mbtcp')

                            control_db.set_single_value(
                                'channels', 'pending', pending_string,
                                channel_condition)
                        else:
                            status_msg += 'modbus write operation returned a non-zero status of ' + str(
                                result['status'])

                elif input['type'] == 'MOTE':
                    mote_node = input['address'].split(':')[0]
                    mote_address = input['address'].split(':')[1]
                    if mote_node == '1':
                        message = '~setsv;' + mote_address + ';' + str(
                            pending['setpoint_value'])
                    else:
                        message = '~sendmsg;' + str(
                            mote_node
                        ) + ';;~setsv;' + mote_address + ';' + str(
                            pending['setpoint_value'])

                    motes_db = pilib.cupidDatabase(pilib.dirs.dbs.motes)
                    from time import sleep
                    for i in range(2):
                        time = datalib.gettimestring(
                            datalib.timestringtoseconds(
                                datalib.gettimestring()) + i)
                        motes_db.insert('queued', {
                            'queuedtime': time,
                            'message': message
                        })

                    # Clear pending setpoint_value
                    pending.pop('setpoint_value', None)
                    pending_string = dicttojson(pending)
                    print('setting pending in setpoint_value mote')

                    control_db.set_single_value('channels', 'pending',
                                                pending_string,
                                                channel_condition)

            if 'enabled' in pending:
                status_msg += 'processing enabledvalue. '

                # Get control output and have a look at it.
                input_name = channel['enabled_input']

                try:
                    inputs = control_db.read_table(
                        'inputs', '"name"=\'' + input_name + "'")
                except:
                    status_msg += 'Inputs query error. '
                    return status_msg

                if len(inputs) == 1:
                    input = inputs[0]
                else:
                    status_msg += 'wrong number of query items returned, length: ' + str(
                        len(inputs)) + '. '
                    return status_msg

                # write_to_input(input, value)
                if input['type'] == 'MBTCP':

                    input_id = input['id']

                    # Now, using this id, we can determine uniquely which MBTCP entry it came from
                    splits = input_id.split('_')
                    interfaceid = splits[0]
                    register = splits[1]
                    length = splits[2]

                    string_condition = dblib.string_condition_from_lists(
                        ['interfaceid', 'register', 'length'],
                        [interfaceid, register, length])
                    input_mb_entry = control_db.read_table(
                        'modbustcp', string_condition)[0]

                    # Get IP address
                    address = control_db.get_single_value(
                        'interfaces', 'address',
                        '"id"=\'' + input_mb_entry['interfaceid'] + "'")

                    from iiutilities import netfun
                    # print(address, register,input_mb_entry['format'], int(pending['enabled']))

                    if input_mb_entry['options']:
                        input_options = parseoptions(input_mb_entry['options'])

                    try:
                        result = netfun.writeMBcodedaddresses(
                            address,
                            register, [int(pending['enabled'])],
                            convert=input_mb_entry['format'])
                    except:
                        status_msg += 'Error in modbus'
                    else:
                        if result['statuscode'] == 0:
                            status_msg += 'That seems to have worked ok?'
                            # Clear pending setpoint_value
                            pending.pop('enabled', None)
                            pending_string = dicttojson(pending)
                            print('setting pending in enabled mbtcp')
                            control_db.set_single_value(
                                'channels', 'pending', pending_string,
                                channel_condition)
                        else:
                            status_msg += 'modbus write operation returned a non-zero status of ' + str(
                                result['status'])

                elif input['type'] == 'MOTE':
                    mote_node = input['address'].split(':')[0]
                    mote_address = input['address'].split(':')[1]
                    if mote_node == '1':
                        message = '~setrun;' + mote_address + ';' + str(
                            pending['enabled'])
                    else:
                        message = '~sendmsg;' + str(
                            mote_node
                        ) + ';;~setrun;' + mote_address + ';' + str(
                            pending['enabled'])

                    motes_db = pilib.cupidDatabase(pilib.dirs.dbs.motes)
                    from time import sleep
                    for i in range(2):
                        time = datalib.gettimestring(
                            datalib.timestringtoseconds(
                                datalib.gettimestring() + i))
                        motes_db.insert('queued', {
                            'queuedtime': time,
                            'message': message
                        })

                    # Clear pending setpoint_value
                    pending.pop('enabled', None)
                    pending_string = dicttojson(pending)

                    control_db.set_single_value('channels', 'pending',
                                                pending_string,
                                                channel_condition)

        # Insert entry into control log
        insert = {
            'time': time,
            'process_value': channel['process_value'],
            'setpoint_value': channel['setpoint_value'],
            'action': channel['action'],
            'algorithm': channel['control_algorithm'],
            'enabled': channel['enabled'],
            'status_msg': status_msg
        }
        # print(insert)
        log_db.insert(logtablename, insert)

        # Size log
        log_options = datalib.parseoptions(channel['log_options'])
        log_db.size_table(logtablename, **log_options)

    # If active reset and we didn't set channel modes, disable outputs
    # Active reset is not yet explicitly declared, but implied

    if disableoutputs and channel['type'] not in ['remote']:
        status_msg += 'Disabling Outputs. '
        for id in [channel['positive_output'], channel['negative_output']]:
            control_db.set_single_value('outputs',
                                        'value',
                                        0,
                                        '"id"=\'' + id + "'",
                                        queue=True)
            status_msg += 'Outputs disabled for id=' + id + '. '

    # Set status message for channel
    control_db.set_single_value('channels',
                                'status_message',
                                status_msg,
                                channel_condition,
                                queue=True)

    # Set update time for channel
    control_db.set_single_value('channels',
                                'control_updatetime',
                                time,
                                channel_condition,
                                queue=True)

    # Execute query
    control_db.execute_queue()
    return status_msg
示例#14
0
def process_channel(**kwargs):

    systemstatus = system_db.read_table_row('systemstatus')[0]
    if 'channel' in kwargs:
        channel = kwargs['channel']
    elif 'channel_name' in kwargs:
        channels = control_db.read_table('channels', '"name"=\'' + kwargs['channel_name'] + "'")
        if len(channels) == 1:
            channel = channels[0]
        else:
            print('wrong number of channels returned. aborting')
            return


    # channelindex = str(int(channel['channelindex']))
    logtablename = 'channel' + '_' + channel['name'] + '_log'
    time = datalib.gettimestring()
    disableoutputs = True

    status_msg = channel['name'] + ': '

    log_tablenames = log_db.get_table_names()

    # Channel enabled means different things for different types of channels

    channel_condition = '"name"=\'{}\''.format(channel['name'])

    # Create log if it doesn't exist
    if logtablename not in log_tablenames:
        log_db.create_table(logtablename, pilib.schema.channel_datalog)

    if channel['type'] == 'local':

        if channel['enabled']:

            status_msg = ''
            try:
                setpoint_value = float(channel['setpoint_value'])
            except:
                channel['enabled'] = 0
                status_msg += 'Error with setpoint. Disabling'
                control_db.set_single_value('channels', 'enabled', 0, channel_condition)

            # Need to test for age of data. If stale or disconnected, invalidate
            try:
                process_value = float(channel['process_value'])
            except:
                status_msg += 'Invalid control value. Disabling channel. '
                channel['enabled'] = 0
                control_db.set_single_value('channels', 'enabled', 0, channel_condition)

        # Move forward if still enabled after error-checking
        if channel['enabled']:

            status_msg += 'Channel Enabled. '

            # TODO : look at channel auto mode.
            if channel['mode'] == 'auto':
                status_msg += 'Mode:Auto. '
                # print('running auto sequence')

                # run algorithm on channel

                response = controllib.runalgorithm(pilib.dirs.dbs.control, pilib.dirs.dbs.session, channel['name'])
                action = response[0]
                message = response[1]

                status_msg += ' ' + response[1] + ' '
                status_msg += 'Action: ' + str(action) + '. '

                # Set action in channel

                controllib.setaction(pilib.dirs.dbs.control, channel['name'], action)

            elif channel['mode'] == 'manual':
                # print('manual mode')
                status_msg += 'Mode:Manual. '
                action = controllib.getaction(pilib.dirs.dbs.control, channel['name'])
            else:
                # print('error, mode= ' + mode)
                status_msg += 'Mode:Error. '

            if systemstatus['enableoutputs']:
                status_msg += 'System outputs enabled. '
                if channel['outputs_enabled']:
                    status_msg += 'Channel outputs enabled. '
                    disableoutputs = False

                    # find out whether action is positive or negative or
                    # not at all.

                    # and act. for now, this is binary, but in the future
                    # this will be a duty cycle daemon

                    outputsetnames = []
                    outputresetnames = []
                    if action > 0:
                        print("set positive output on")
                        outputsetnames.append(channel['positive_output'])
                        outputresetnames.append(channel['negative_output'])
                    elif action < 0:
                        print("set negative output on")
                        outputsetnames.append(channel['negative_output'])
                        outputresetnames.append(channel['positive_output'])
                    elif action == 0:
                        status_msg += 'No action. '
                        outputresetnames.append(channel['positive_output'])
                        outputresetnames.append(channel['negative_output'])
                    else:
                        status_msg += 'Algorithm error. Doing nothing.'

                    # Check to see if outputs are ready to enable/disable
                    # If not, pull them from list of set/reset

                    control_algorithm = control_db.read_table('controlalgorithms', condition='"name"=\'' + channel['controlalgorithm'] + "'")
                    if len(control_algorithm) == 1:
                        algorithm = control_algorithm[0]
                    else:
                        status_msg += 'Algorithm Error: Not found (or multiple?). Using default. '
                        algorithm = default_control_algorithm

                    outputstoset = []
                    for outputname in outputsetnames:
                        offtime = control_db.get_single_value('outputs', 'offtime',
                                                              condition='"name"=\'' + outputname + "'")
                        if datalib.timestringtoseconds(
                                datalib.gettimestring()) - datalib.timestringtoseconds(offtime) > algorithm[
                            'minofftime']:
                            outputstoset.append(outputname)
                        else:
                            status_msg += 'Output ' + outputname + ' not ready to enable. '

                    outputstoreset = []
                    for outputname in outputresetnames:
                        ontime = control_db.get_single_value('outputs', 'ontime',
                                                             condition='"name"=\'' + outputname + "'")
                        if datalib.timestringtoseconds(
                                datalib.gettimestring()) - datalib.timestringtoseconds(ontime) > algorithm[
                            'minontime']:
                            outputstoreset.append(outputname)
                        else:
                            status_msg += 'Output ' + outputname + ' not ready to disable. '

                    """ TODO: Change reference to controlinputs to name rather than id. Need to double-check
                    enforcement of no duplicates."""

                    # Find output in list of outputs if we have one to set

                    time = datalib.gettimestring()
                    if len(outputstoset) > 0 or len(outputstoreset) > 0:
                        for output in outputs:
                            id_condition = '"id"=\'' + output['id'] + "'"
                            if output['name'] in outputstoset:

                                # check current status
                                currvalue = output['value']
                                if not currvalue:  # No need to set if otherwise. Will be different for analog out
                                    # set ontime
                                    control_db.set_single_value('outputs', 'ontime', time, id_condition, queue=True)
                                    # set value
                                    control_db.set_single_value('outputs', 'value', 1, id_condition, queue=True)
                                    status_msg += 'Output ' + output['name'] + ' enabled. '
                                else:
                                    status_msg += 'Output ' + output['name'] + ' already enabled. '

                            if output['name'] in outputstoreset:
                                # check current status
                                currvalue = output['value']
                                if currvalue:  # No need to set if otherwise. Will be different for analog out
                                    # set ontime
                                    control_db.set_single_value('outputs', 'offtime', time, id_condition,
                                                                queue=True)
                                    # set value
                                    control_db.set_single_value('outputs', 'value', 0, id_condition, queue=True)
                                    status_msg += 'Output ' + output['name'] + ' disabled. '
                                else:
                                    status_msg += 'Output ' + output['name'] + ' already disabled. '

                else:
                    status_msg += 'Channel outputs disabled. '
                    action = 0

            else:
                status_msg += 'System outputs disabled. '
                action = 0

            # Insert entry into control log
            insert = {'time': time, 'process_value': channel['process_value'],
                      'setpoint_value': channel['setpoint_value'],
                      'action': channel['action'], 'algorithm': channel['algorithm_name'],
                      'enabled': channel['enabled'],
                      'status_msg': status_msg}
            control_db.insert(logtablename, insert, queue=True)

            log_options = datalib.parseoptions(channel['log_options'])
            log_db.size_table(logtablename, **log_options)
        else:
            # Chanel is disabled. Need to do active disable here.
            pass

    elif channel['type'] == 'remote':
        status_msg += 'Remote channel. '

        if channel['pending']:

            from iiutilities.datalib import parseoptions, dicttojson
            status_msg += 'Processing pending action. '
            pending = parseoptions(channel['pending'])

            if 'setpoint_value' in pending:
                status_msg += 'processing setpoint_value. '
                # Get control output and have a look at it.
                input_name = channel['sv_input']

                # try:
                inputs = control_db.read_table('inputs', '"name"=\'' + input_name + "'")
                # except:
                #     status_msg += 'Inputs query error. '
                #     return status_msg

                if len(inputs) == 1:
                    input = inputs[0]
                else:
                    status_msg += 'wrong number of query items returned, length: ' + str(len(inputs)) + ' for query on input name: ' + input_name
                    print('ERROR: ' + status_msg)
                    return status_msg


                # write_to_input(input, value)
                if input['type'] == 'MBTCP':

                    input_id = input['id']

                    # Now, using this id, we can determine uniquely which MBTCP entry it came from
                    splits = input_id.split('_')
                    interfaceid = splits[0]
                    register = splits[1]
                    length = splits[2]

                    string_condition = dblib.string_condition_from_lists(['interfaceid', 'register', 'length'],
                                                                         [interfaceid, register, length])
                    input_mb_entry = control_db.read_table('modbustcp', string_condition)[0]

                    # Get IP address
                    address = control_db.get_single_value('interfaces', 'address',
                                                          '"id"=\'' + input_mb_entry['interfaceid'] + "'")

                    from iiutilities import netfun

                    if input_mb_entry['options']:
                        input_options = parseoptions(input_mb_entry['options'])
                        if 'scale' in input_options:
                            pending['setpoint_value'] = float(pending['setpoint_value'])/float(input_options['scale'])

                    try:
                        result = netfun.writeMBcodedaddresses(address, register, [float(pending['setpoint_value'])], convert=input_mb_entry['format'])
                    except:
                        status_msg += 'Error in modbus'
                    else:
                        if result['statuscode'] == 0:

                            # Clear pending setpoint_value
                            pending.pop('setpoint_value', None)
                            pending_string = dicttojson(pending)
                            print('setting pending in setpointvaleu mbtcp')

                            control_db.set_single_value('channels','pending',pending_string, channel_condition)
                        else:
                            status_msg += 'modbus write operation returned a non-zero status of ' + str(result['status'])

                elif input['type'] == 'MOTE':
                    mote_node = input['address'].split(':')[0]
                    mote_address = input['address'].split(':')[1]
                    if mote_node == '1':
                        message = '~setsv;' + mote_address + ';' + str(pending['setpoint_value'])
                    else:
                        message = '~sendmsg;' + str(mote_node) + ';;~setsv;' + mote_address + ';' + str(pending['setpoint_value'])

                    motes_db = pilib.cupidDatabase(pilib.dirs.dbs.motes)
                    from time import sleep
                    for i in range(2):
                        time = datalib.gettimestring(datalib.timestringtoseconds(datalib.gettimestring()) + i)
                        motes_db.insert('queued', {'queuedtime':time, 'message':message})

                    # Clear pending setpoint_value
                    pending.pop('setpoint_value', None)
                    pending_string = dicttojson(pending)
                    print('setting pending in setpoint_value mote')

                    control_db.set_single_value('channels', 'pending', pending_string, channel_condition)

            if 'enabled' in pending:
                status_msg += 'processing enabledvalue. '

                # Get control output and have a look at it.
                input_name = channel['enabled_input']

                try:
                    inputs = control_db.read_table('inputs', '"name"=\'' + input_name + "'")
                except:
                    status_msg += 'Inputs query error. '
                    return status_msg

                if len(inputs) == 1:
                    input = inputs[0]
                else:
                    status_msg += 'wrong number of query items returned, length: ' + str(len(inputs)) + '. '
                    return status_msg

                # write_to_input(input, value)
                if input['type'] == 'MBTCP':

                    input_id = input['id']

                    # Now, using this id, we can determine uniquely which MBTCP entry it came from
                    splits = input_id.split('_')
                    interfaceid = splits[0]
                    register = splits[1]
                    length = splits[2]

                    string_condition = dblib.string_condition_from_lists(
                        ['interfaceid', 'register', 'length'],
                        [interfaceid, register, length])
                    input_mb_entry = control_db.read_table('modbustcp', string_condition)[0]

                    # Get IP address
                    address = control_db.get_single_value('interfaces', 'address',
                                                          '"id"=\'' + input_mb_entry['interfaceid'] + "'")

                    from iiutilities import netfun
                    # print(address, register,input_mb_entry['format'], int(pending['enabled']))

                    if input_mb_entry['options']:
                        input_options = parseoptions(input_mb_entry['options'])


                    try:
                        result = netfun.writeMBcodedaddresses(address, register,
                                                              [int(pending['enabled'])],
                                                              convert=input_mb_entry['format'])
                    except:
                        status_msg += 'Error in modbus'
                    else:
                        if result['statuscode'] == 0:
                            status_msg += 'That seems to have worked ok?'
                            # Clear pending setpoint_value
                            pending.pop('enabled', None)
                            pending_string = dicttojson(pending)
                            print('setting pending in enabled mbtcp')
                            control_db.set_single_value('channels', 'pending', pending_string,
                                                        channel_condition)
                        else:
                            status_msg += 'modbus write operation returned a non-zero status of ' + str(
                                result['status'])

                elif input['type'] == 'MOTE':
                    mote_node = input['address'].split(':')[0]
                    mote_address = input['address'].split(':')[1]
                    if mote_node == '1':
                        message = '~setrun;' + mote_address + ';' + str(pending['enabled'])
                    else:
                        message = '~sendmsg;' + str(mote_node) + ';;~setrun;' + mote_address + ';' + str(
                            pending['enabled'])

                    motes_db = pilib.cupidDatabase(pilib.dirs.dbs.motes)
                    from time import sleep
                    for i in range(2):
                        time = datalib.gettimestring(datalib.timestringtoseconds(datalib.gettimestring() + i))
                        motes_db.insert('queued', {'queuedtime': time, 'message': message})

                    # Clear pending setpoint_value
                    pending.pop('enabled', None)
                    pending_string = dicttojson(pending)

                    control_db.set_single_value('channels', 'pending', pending_string, channel_condition)


        # Insert entry into control log
        insert = {'time': time, 'process_value': channel['process_value'],
                  'setpoint_value': channel['setpoint_value'],
                  'action': channel['action'], 'algorithm': channel['control_algorithm'],
                  'enabled': channel['enabled'],
                  'status_msg': status_msg}
        # print(insert)
        log_db.insert(logtablename, insert)

        # Size log
        log_options = datalib.parseoptions(channel['log_options'])
        log_db.size_table(logtablename, **log_options)


    # If active reset and we didn't set channel modes, disable outputs
    # Active reset is not yet explicitly declared, but implied

    if disableoutputs and channel['type'] not in ['remote']:
        status_msg += 'Disabling Outputs. '
        for id in [channel['positive_output'], channel['negative_output']]:
            control_db.set_single_value('outputs','value',0,'"id"=\'' + id + "'", queue=True)
            status_msg += 'Outputs disabled for id=' + id + '. '

    # Set status message for channel
    control_db.set_single_value('channels', 'status_message', status_msg, channel_condition, queue=True)

    # Set update time for channel
    control_db.set_single_value('channels', 'control_updatetime', time, channel_condition, queue=True)

    # Execute query
    control_db.execute_queue()
    return status_msg
示例#15
0
def runalgorithm(controldbpath, recipedbpath, channelname):
    from iiutilities.datalib import timestringtoseconds, gettimestring
    from iiutilities.dblib import sqlitequery, datarowtodict
    from iiutilities import dblib
    import time

    message = ''

    # get our details of our channel

    # controldb = dblib.sqliteDatabase(controldbpath)
    # controldb.read_table('channels', condition="name='{}'".format(channelname), queue=True)

    channeldata = sqlitequery(controldbpath, 'select * from channels where name=' + "'" + channelname + "'")[0]
    channeldict = datarowtodict(controldbpath, 'channels', channeldata)
    # check to see if we are running a recipe

    controlrecipename = channeldict['controlrecipe']
    if controlrecipename and controlrecipename != 'none':

        # Get recipe details
        # If recipes get too big, we'll just get 
        # a couple stages. For now, we make a 
        # dictionary array

        #print('we are in recipe ' + controlrecipename)
        #print(dirs.dbs.session)

        recipedata = sqlitequery(recipedbpath, 'select * from \'' + controlrecipename + '\'')
        recipedictarray = []

        for stage in recipedata:
            recipedict = datarowtodict(recipedbpath, controlrecipename, stage)
            recipedictarray.append(recipedict)

        # get current stage
        currentstagenumber = int(channeldict['recipestage'])
        #print('current stage is ' + str(currentstagenumber) ) 

        # Get data for current stage
        stagefound = False
        for stage in recipedictarray:
            if int(stage['stagenumber']) == currentstagenumber:
                currentstage = stage
                stagefound = True
                break
        if stagefound:
            #print("stage found")
            pass
        else:
            print('error. stage not found.')

        # Check to see if we need to move to next stage
        currenttime = time.time()
        #print('Time')
        #print(currenttime)
        #print(gettimestring(currenttime)) 

        if currentstagenumber == 0 or currenttime - timestringtoseconds(channeldict['recipestagestarttime']) > int(
                currentstage['stagelength']):
            print('stage time expired for stage ' + str(currentstagenumber) + '. Checking on stage advance. ')

            # Advance stage if there is another stage. Otherwise
            # update channel to be off a recipe. We assume explicitly 
            # that the stages are sequential integers.

            nextstagenumber = currentstagenumber + 1

            # look for next stage

            stagefound = False
            for stage in recipedictarray:
                if int(stage['stagenumber']) == nextstagenumber:
                    nextstage = stage
                    stagefound = True
                    break

            if stagefound:
                print(' Next stage was found. Setting next stage. ')
                if currentstagenumber == 0:
                    print("Stagenumber is 0. Setting recipe start time. ")

                    # Set recipe start time 
                    sqlitequery(controldbpath, 'update channels set recipestarttime=\'' + gettimestring(
                        currenttime) + '\' where name=\'' + channelname + '\'')

                # Set stage to new stage number
                sqlitequery(controldbpath, 'update channels set recipestage=\'' + str(
                    nextstagenumber) + '\' where name=\'' + channelname + '\'')

                # Set setpointvalue
                sqlitequery(controldbpath, 'update channels set setpointvalue=\'' + str(
                    nextstage['setpointvalue']) + '\' where name=\'' + channelname + '\'')

                # Set stage start time to now 
                sqlitequery(controldbpath, 'update channels set recipestagestarttime=\'' + gettimestring(
                    currenttime) + '\' where name=\'' + channelname + '\'')

                # Set new controlalgorithm 
                sqlitequery(controldbpath, 'update channels set controlalgorithm=\'' + nextstage[
                    'controlalgorithm'] + '\' where name=\'' + channelname + '\'')

            else:

                # Take channel off recipe
                sqlitequery(controldbpath,
                            'update channels set controlrecipe=\'none\' where name=\'' + channelname + '\'')
                sqlitequery(controldbpath, 'update channels set recipestate=\'0\' where name=\'' + channelname + '\'')

                sqlitequery(controldbpath,
                            'update channels set recipestage=\'0\' where name=\'' + channelname + '\'')


                # if lengthmode is setpoint

                # get current stage

                # check stage start against stage length
                # and current time

                # move to next stage if time and revise setpoint

                # adjust setpoint based on stage

                # set action based on setpoint

    else:
        # make sure we're not on recipe and on stage 0
        sqlitequery(controldbpath,
                    'update channels set controlrecipe=\'none\' where name=\'' + channelname + '\'')
        sqlitequery(controldbpath, 'update channels set recipestate=\'0\' where name=\'' + channelname + '\'')

        sqlitequery(controldbpath,
                    'update channels set recipestage=\'0\' where name=\'' + channelname + '\'')

    algorithm = channeldict['controlalgorithm']
    setpointvalue = float(channeldict['setpointvalue'])
    controlvalue = float(channeldict['controlvalue'])

    algorithmrows = sqlitequery(controldbpath, 'select * from controlalgorithms where name=' + "'" + algorithm + "'")
    algorithmrow = algorithmrows[0]
    algorithm = datarowtodict(controldbpath, 'controlalgorithms', algorithmrow)
    algtype = algorithm['type']

    if algtype == 'on/off with deadband':
        #print(type) 
        deadbandhigh = algorithm['deadbandhigh']
        deadbandlow = algorithm['deadbandlow']
        if setpointvalue > (controlvalue + deadbandhigh):
            action = 100
        elif setpointvalue < (controlvalue - deadbandlow):
            action = -100
        else:
            action = 0
    #print('setpoint' + str(setpoint))
    #print('controlvalue' + str(controlvalue)) 
    #print(action)
    #print(message)
    return [action, message]
示例#16
0
def rundaemon(**kwargs):


    """
    First thing we are going to do is check to see if code is working. We do this first to minimize what we have to
    import to test this -- the script should not crash out before we do this.

    So we need dblib to function to read from the database to see whether we are going to email someone if things are
    broken.
    We need datalib to parse options on the notifications
    We also need utility to send an email
    """

    settings = {
        'startall':False,
        'debug':False,
        'daemon_freq': 60,
        'unit_test_frequency': 3600,  # Once per hour
        'runonce':False
    }

    settings.update(kwargs)
    FNULL = open(os.devnull, 'w')

    try:
        import socket
        hostname = socket.gethostname()
    except:
        hostname = 'unknown (?!)'

    import importlib

    try:
        import simplejson as json
    except:
        import json

    testmodules = ['iiutilities.dblib', 'iiutilities.utility', 'iiutilities.datalib', 'cupid.pilib']

    # these are the libraries we will need to send notifications that things aren't working.
    # To do this, however, we need some libraries.
    failures = ''
    for testmodule in testmodules:
        try:
            tempmodule = importlib.import_module(testmodule)
        except:
            failures += testmodule + ', '
    if failures:
        # Send an email to indicate that things are horribly broken
        subject = 'Hostname: ' + hostname + ' things are broken.'
        message = 'Test import of module(s) ' + failures[:-2] + ' failed. '
        em_gmail = gmail(subject=subject, message=message)
        em_gmail.send()

    from iiutilities import dblib, utility, datalib
    from cupid import pilib
    if settings['debug']:
        print('** DEBUG MODE ** ')
        pilib.set_debug()

    last_unittests = ''

    # Get notifications so we know when to notify
    system_database = pilib.dbs.system
    notification_database = pilib.dbs.notifications

    while True:

        notifications = system_database.read_table('notifications')

        currenttime = datalib.gettimestring()

        run_unit_tests = False
        if not last_unittests:
            run_unit_tests = True
        elif datalib.timestringtoseconds(currenttime) - datalib.timestringtoseconds(last_unittests) > settings['unit_test_frequency']:
            run_unit_tests = True

        if run_unit_tests:
            utility.log(pilib.dirs.logs.daemon, 'Running unit tests. ', 2, pilib.loglevels.daemon)
            handle_unit_tests()
            last_unittests = datalib.gettimestring()

        from subprocess import Popen, PIPE
        from time import sleep

        """
        Set up list of enabled statuses (whether to restart if
        we find that the process is not currently running
        from iiutilities import dblib, utility, datalib
        """

        system_status_options = system_database.read_table_row('systemstatus')[0]
        # print('systemstatusoptions')
        # print(system_status_options)

        item_enabled_dict = {'updateio':int(system_status_options['updateioenabled']),
                             'picontrol':int(system_status_options['picontrolenabled']),
                             'systemstatus':int(system_status_options['systemstatusenabled']),
                             'sessioncontrol':int(system_status_options['sessioncontrolenabled']),
                             'serialhandler':int(system_status_options['serialhandlerenabled'])
                             }

        # updateio_enabled = int(system_status_options['updateioenabled'])
        # picontrol_enabled = int(system_status_options['picontrolenabled'])
        # systemstatus_enabled = int(system_status_options['systemstatusenabled'])
        # sessioncontrol_enabled = int(system_status_options['sessioncontrolenabled'])
        # serialhandler_enabled =int( system_status_options['serialhandlerenabled'])

        # enableditemlist = [(int(updateio_enabled)), (int(picontrolenabled)), int(systemstatusenabled), int(sessioncontrolenabled), int(serialhandlerenabled)]

        # These are hard-coded and must match up for now. This should be cleaned up to be more easily modified.
        itemstatuses = utility.find_proc_statuses(pilib.daemonprocs)

        item_status_dict = {}
        for proc_name, status in zip(pilib.daemonprocnames, itemstatuses):
            item_status_dict[proc_name] = status

        """
        Here we check to see if things are running properly and not hung. First here is systemstatus
        """

        if item_enabled_dict['systemstatus'] and item_status_dict['systemstatus']['count'] == 1:
            lastsystemstatus = dblib.getsinglevalue(pilib.dirs.dbs.system, 'systemstatus', 'lastsystemstatuspoll')
            currenttime = datalib.gettimestring()

            timesincelastsystemstatus = datalib.timestringtoseconds(currenttime) - datalib.timestringtoseconds(lastsystemstatus)
            timecriterion = 90
            if timesincelastsystemstatus > timecriterion:
                utility.log(pilib.dirs.logs.daemon, 'Killing systemstatus because it has not run in ' + str(timesincelastsystemstatus) + 's', 1,pilib.loglevels.daemon)
                # utility.log(pilib.dirs.logs.system, 'Killing systemstatus because it has not run in ' + str(timesincelastsystemstatus) + 's',1,1, pilib.loglevels.system)

                killnotify = next((item for item in notifications if item['item'] == 'daemonkillproc' and int(item['enabled'])), None)
                if killnotify:
                    options = datalib.parseoptions(killnotify['options'])
                    if 'type' in options:
                        if 'type' == 'email' and 'email' in options:
                            # Queue a message indicating we had to restart the systemstatus daemon
                            message = 'Systemstatus is being killed on ' + hostname + ' because it has not run in ' + \
                                str(timesincelastsystemstatus) + 's with a criteria of ' +  \
                                str(timecriterion) + '. This occurred at ' + currenttime
                            subject = 'CuPID : ' + hostname + ' : killnotify'
                            notification_database.insert('queuednotifications',
                                                         {'type': 'email', 'message': message,
                                                          'options': 'email:' + options['email'] + ',subject:' + subject,
                                                          'queuedtime': currenttime})

                utility.kill_proc_by_name('systemstatus')

                # Also kill hamachi, since this is almost always the culprit
                utility.kill_proc_by_name('hamachi')

        # These are hard-coded and must match up for now. This should be cleaned up to be more easily modified.
        hamachi_status = utility.find_proc_statuses(['hamachi'])[0]
        if hamachi_status['count'] > 1:
            utility.log(pilib.dirs.logs.daemon, 'Killing hamachi with proc count of {}'.format(hamachi_status['count']), 0, pilib.loglevels.daemon)
            utility.kill_proc_by_name('hamachi')


        # Set system message
        systemstatusmsg = ''
        for name in pilib.daemonprocnames:
            systemincmessage = name + ' - Enabled: ' + str(item_enabled_dict[name]) + ' Status: ' + str(item_status_dict[name]['count']) + '. '
            systemstatusmsg += systemincmessage
            utility.log(pilib.dirs.logs.daemon, 'Item status message: ' + systemincmessage, 0, pilib.loglevels.daemon)

        system_database.set_single_value('systemstatus', 'systemmessage', systemstatusmsg)

        # Set up list of itemnames in the systemstatus table that
        # we assign the values to when we detect if the process
        # is running or not

        for name, process in zip(pilib.daemonprocnames, pilib.daemonprocs):

            # set status
            if item_status_dict[name]['count'] == 1:
                # Set status variable by name. This is static based on schema
                system_database.set_single_value('systemstatus', name + 'status', 1)
                if pilib.loglevels.daemon > 0:
                    utility.log(pilib.dirs.logs.daemon, 'Process is running: ' + pilib.dirs.baselib + process, 4, pilib.loglevels.daemon)

            elif item_status_dict[name]['count'] > 1:
                # multiple instances are running. This is bad.
                system_database.set_single_value('systemstatus', name + 'status', 0)
                if pilib.loglevels.daemon > 0:
                    utility.log(pilib.dirs.logs.daemon, 'Multple instances of process {} are running ({}): '.format(pilib.dirs.baselib + process, item_status_dict[name]['count']), 2,
                                pilib.loglevels.daemon)

                utility.kill_proc_by_name(process)

            # Now fire up if we need to.
            if item_status_dict[name]['count'] != 1:
                system_database.set_single_value('systemstatus', name + 'status', 0)
                if pilib.loglevels.daemon > 0:
                    utility.log(pilib.dirs.logs.daemon, 'Process is not running: ' + pilib.dirs.baselib + process, 2, pilib.loglevels.daemon)

                # run if set to enable
                if item_enabled_dict[name]:
                    # print(pilib.dirs.baselib + pilib.daemonprocs[index])
                    if pilib.loglevels.daemon > 0:
                        utility.log(pilib.dirs.logs.daemon, 'Starting ' + pilib.dirs.baselib + process, 2, pilib.loglevels.daemon)

                    # procresult = Popen([pilib.dirs.baselib + process], stdout=PIPE, stderr=PIPE)
                    procresult = Popen([pilib.dirs.baselib + process, '&'], stdout=FNULL, stderr=FNULL)
                    # if pilib.loglevels.daemon > 0:
                    #     pilib.writedatedlogmsg(pilib.dirs.logs.daemonproc, procresult.stdout.read())



        # Time to let things start up
        sleep(3)

        # Refresh after set
        itemstatuses = utility.find_proc_statuses(pilib.daemonprocs)
        item_status_dict = {}
        for name, status in zip(pilib.daemonprocnames, itemstatuses):
            item_status_dict[name] = status

        for name in pilib.daemonprocnames:
            # set status
            if item_status_dict[name]:
                system_database.set_single_value('systemstatus', name + 'status', 1)
            else:
                system_database.set_single_value('systemstatus', name + 'status', 0)

        """
        Process Actions.
        Careful here. This does not carry out things like indicators, which are set from picontrol. A bit wonky, as we
        would like the indicators to update immediately. On the other hand, we want picontrol to be the master controller
        of IO.
        """

        from cupid.actions import processactions
        utility.log(pilib.dirs.logs.daemon, 'Processing actions', 2, pilib.loglevels.daemon)
        processactions()
        utility.log(pilib.dirs.logs.daemon, 'Done processing actions', 2, pilib.loglevels.daemon)

        systemstatusmsg = ''
        for name in pilib.daemonprocnames:
            systemincmessage = name + ' - Enabled: ' + str(item_enabled_dict[name]) + ' Status: ' + json.dumps(
                item_status_dict[name]) + '. '
            systemstatusmsg += systemincmessage
            if pilib.loglevels.daemon > 0:
                utility.log(pilib.dirs.logs.daemon, 'Item status message: ' + systemincmessage, 2, pilib.loglevels.daemon)

        # print(systemstatusmsg)
        system_database.set_single_value('systemstatus', 'systemmessage', systemstatusmsg)

        # Rotate all logs
        utility.log(pilib.dirs.logs.daemon, 'Rotating logs. ')
        pilib.rotate_all_logs()

        if settings['runonce']:
            return
示例#17
0
def watchdognetstatus(allnetstatus={}):

    from iiutilities import utility
    from cupid import pilib
    from iiutilities import datalib
    from cupid import netconfig
    from iiutilities import dblib
    """
    And now comes the checking of configuration specific statuses and restarting them if enabled
    and necessary

    We are getting updated status information for each interface. 
    
    We have configuration info for interfaces. We compare the two based on mode and decide if we need to run 
    a netconfig on each interface. We do this by running through, interface by interface on netconfigstatus, and 
    comparing. We then add the name to interfaces we need to reconfig and pass to netconfig().
    
    We ignore interfaces we don't have a config for so we ignore things like hamachi interfaces, loopback, GSM, etc.

    """

    if 'ifaces_config' not in allnetstatus or 'ifaces_status' not in allnetstatus:
        allnetstatus = update_net_status()

    netconfig_data = allnetstatus['netconfig_data']
    netstatus = allnetstatus['netstatusdict']
    ifaces_config = allnetstatus['ifaces_config']
    ifaces_status = allnetstatus['ifaces_status']

    statusmsg = ''
    currenttime = datalib.gettimestring()

    reconfig_interfaces = []
    for iface_name, iface_status in ifaces_status.items():
        utility.log(
            pilib.dirs.logs.network,
            'Checking status of interface {}. '.format(
                iface_name, 3, pilib.loglevels.network))
        if iface_status['status'] == 'fail':
            reconfig_interfaces.append(iface_name)
            utility.log(
                pilib.dirs.logs.network,
                'Interface has fail status. Setting reconfig for {}. '.format(
                    iface_name, 1, pilib.loglevels.network))

    # Now do some sleuthing if we are being stringent about WAN access. Have to be careful about this if we are on a
    # private network

    run_WAN_reconfig = False
    if netconfig_data['requireWANaccess']:
        utility.log(pilib.dirs.logs.network,
                    'Requiring WAN access. Checking status and times. ', 3,
                    pilib.loglevels.network)
        # print('NETSTATUS')
        # print(netstatus)
        if not netstatus['WANaccess']:
            utility.log(pilib.dirs.logs.network,
                        'No WANaccess. Checking offline time. ', 2,
                        pilib.loglevels.network)
            try:
                offlinetime = netstatus['offlinetime']
            except:
                # print('netstatus ERROR')
                utility.log(pilib.dirs.logs.network,
                            'Error getting offlinetime. ', 2,
                            pilib.loglevels.network)

            offlineperiod = datalib.timestringtoseconds(datalib.gettimestring(
            )) - datalib.timestringtoseconds(offlinetime)

            utility.log(pilib.dirs.logs.network,
                        'We have been offline for ' + str(offlineperiod))

            # When did we last restart the network config? Is it time to again?
            timesincelastnetrestart = datalib.timestringtoseconds(
                datalib.gettimestring()) - datalib.timestringtoseconds(
                    netstatus['lastnetreconfig'])

            utility.log(
                pilib.dirs.logs.network,
                'It has been ' + str(timesincelastnetrestart) +
                ' seconds since we last restarted the network configuration. ')

            # Require that offline time is greater than WANretrytime
            if timesincelastnetrestart > int(
                    netconfig_data['WANretrytime']) and offlineperiod > int(
                        netconfig_data['WANretrytime']):
                utility.log(
                    pilib.dirs.logs.network,
                    'We are not online, and it has been long enough, exceeding retry time of '
                    + str(int(netconfig_data['WANretrytime'])))
                dblib.setsinglevalue(pilib.dirs.dbs.system,
                                     'netstatus', 'lastnetreconfig',
                                     datalib.gettimestring())

                # We do reset the WAN offline time in the reboot sequence, hwoever.

                restarts = int(
                    dblib.getsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                                         'WANaccessrestarts'))
                restarts += 1
                dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                                     'WANaccessrestarts', restarts)

                utility.log(pilib.dirs.logs.network,
                            'Going to run netconfig to correct WAN access.')
                run_WAN_reconfig = True

            else:
                utility.log(
                    pilib.dirs.logs.network,
                    'Not yet time to run netconfig to correct WAN access. Retry time set at '
                    + str(netconfig_data['WANretrytime']))
        else:
            utility.log(pilib.dirs.logs.network, 'WANAccess is fine. ')

    if run_WAN_reconfig:
        # Set bad status in netstatus
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstate', 0)

        # Set ok time to '' to trigger rewrite next time status is ok
        lastoktime = dblib.getsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                                          'netstateoktime')
        if not lastoktime:
            dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                                 'netstateoktime', datalib.gettimestring())
        else:
            if netconfig_data['rebootonfail']:
                offlinetime = datalib.timestringtoseconds(
                    datalib.gettimestring()) - datalib.timestringtoseconds(
                        lastoktime)
                if offlinetime > int(netconfig_data['rebootonfailperiod']):

                    # Set to '' so we get another full fail period before rebooting again
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                                         'netstateoktime', '')

                    # Same thing for WAN offline time
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                                         'offlinetime', '')

                    bootcounts = int(
                        dblib.getsinglevalue(pilib.dirs.dbs.system,
                                             'netstatus', 'netrebootcounter'))
                    bootcounts += 1
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                                         'netrebootcounter', str(bootcounts))

                    # Set system flag to reboot
                    utility.log(pilib.dirs.logs.system,
                                'REBOOTING to try to fix network', 0,
                                pilib.loglevels.system)
                    dblib.setsinglevalue(pilib.dirs.dbs.system, 'systemflags',
                                         'reboot', 1)

        # Figure out which interfaces to restart to fix WAN issues

        for interface_name, interface in ifaces_config.items():
            utility.log(
                pilib.dirs.logs.network,
                'Adding interface {} to reconfig list'.format(interface_name),
                1, pilib.loglevels.network)
            if interface['mode'] in ['status', 'station', 'dhcp']:
                reconfig_interfaces.append(interface_name)

    else:
        # Clear bad status in netstatus and set netoktime
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'statusmsg',
                             'Mode appears to be set.')
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'netstate', 1)
        dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus',
                             'netstateoktime', datalib.gettimestring())

    dblib.setsinglevalue(pilib.dirs.dbs.system, 'netstatus', 'statusmsg',
                         statusmsg)
    if reconfig_interfaces:
        utility.log(
            pilib.dirs.logs.network,
            'Running netreconfig on list: {}'.format(reconfig_interfaces), 1,
            pilib.loglevels.network)

        netconfig.runconfig(ifaces_to_configure=reconfig_interfaces,
                            config=ifaces_config,
                            config_all=False)
示例#18
0
def run_data_agent(**kwargs):

    from iiutilities.datalib import gettimestring, timestringtoseconds

    settings = {
        'debug':False,
        'agent_db_path':'/var/www/data/dataagent.db',
        'inputs_db_path':'/var/www/data/control.db',
        'inputs_table':'inputs',
        'send_all':False
    }
    settings.update(kwargs)

    data_agent_db = dblib.sqliteDatabase(settings['agent_db_path'])
    inputs_db = dblib.sqliteDatabase(settings['inputs_db_path'])

    # get data_agent items
    data_agent_entries = data_agent_db.read_table('send_items')

    inputs = inputs_db.read_table('inputs')
    inputs_dict = {}
    for input in inputs:
        inputs_dict[input['id']] = input

    current_time = gettimestring()

    """ 
    Loop through to find things that definitely need to be transmitted. 
    Also, find if there are things that should be transmitted within a fixed window (bunch_period)
    If we are going to transmit anyway, attach these items. This way if we barely miss a transmit event, we will
    still send it and not waste data on two sets of headers.
    """

    """ 
    Data has following format:
    post_data = 
    {
      'post_time':current_time,
      'data': [
        {
          id : data_id,
          name : common name (optional)
          data : [
            data entry,
            data entry,
            ...
        }
      ],
      ...
    }
    """
    post_data = {
        'post_time': current_time,
        'data': []
    }
    maybe_xmit = []

    for agent_entry in data_agent_entries:
        if agent_entry['enabled']:
            if settings['debug']:
                print('{} Enabled '.format(agent_entry['id']))
            options = json.loads(agent_entry['options'])

            da_vars.default_agent_item_options.update(options)
            options = da_vars.default_agent_item_options

            # TODO: Build in other modes besides single.
            # Build in modularity for other ordinates.

            # Create the entry
            if agent_entry['id'] not in inputs_dict:
                if settings['debug']:
                    print('input id {} not found '.format(agent_entry['id']))
                continue

            inputs_entry = inputs_dict[agent_entry['id']]

            send_entry = {
                'id': agent_entry['id']
            }
            if 'name' in inputs_dict[agent_entry['id']]:
                send_entry['name'] = inputs_entry['name']

            if options['full_entry']:
                send_entry['data'] = [inputs_entry]
            else:
                send_entry['data'] = [{'id': agent_entry['id'], 'polltime':inputs_entry['polltime'],
                                   'value': inputs_entry['value']}]

            send = False
            maybe_send = False
            if not agent_entry['last_transmit'] or settings['send_all']:
                send = True
            else:
                elapsed_since_xmit = timestringtoseconds(current_time) - timestringtoseconds(agent_entry['last_transmit'])
                if elapsed_since_xmit > options['transmit_period']:
                    send = True
                elif (elapsed_since_xmit + options['bunch_period']) > options['transmit_period']:
                    maybe_send = True

        else:
            if settings['debug']:
                print('{} Disabled '.format(agent_entry['id']))

        if send:
            if settings['debug']:
                print('Sending "{}"'.format(agent_entry['id']))
            post_data['data'].append(send_entry)

        elif maybe_send:
            if settings['debug']:
                print('Sending "{}"'.format(agent_entry['id']))
            maybe_send.append(send_entry)

        else:
            if settings['debug']:
                print('Not sending {}'.format(agent_entry['id']))
    """
    Now determine whether we have data that definitely needs to be sent. If so, throw the bunch data in.
    """

    if post_data['data']:
        post_data['data'].extend(maybe_xmit)
        if settings['debug']:
            print('TIME TO SEND THIS STUFF')
            print(post_data)

    try:
        response = post_client_data(**{'post_data':post_data})
    except:

        import traceback
        trace_message = traceback.format_exc()
        if settings['debug']:
            print('Error, traceback: \n{}'.format(trace_message))
        return {'status':1, 'message':trace_message}
    else:
        if settings['debug']:
            print('SUCCESS')

        # Now we need to mark entries as sent
        for entry in post_data['data']:
            data_agent_db.set_single_value('send_items', 'last_transmit', current_time, condition="id='{}'".format(entry['id']), queue=True)

        data_agent_db.execute_queue()

    return response
示例#19
0
def updateowfsdevices(busdevices, myProxy=None, debug=False):
    from cupid import pilib
    from iiutilities import dblib
    from iiutilities import datalib
    from iiutilities import utility

    # get defaults
    defaults = pilib.dirs.dbs.control.read_table('defaults')
    default_dict = {}
    for default_item in defaults:
        default_dict[default_item['valuename']] = default_item['value']

    # get current entries
    previnputs = pilib.dirs.dbs.control.read_table('inputs')

    # Make list of IDs for easy indexing
    previnputids = []
    for input in previnputs:
        previnputids.append(input['id'])

    # Iterate over devices. Determine if values exist for polltime, frequency.
    # If so, update the device. If not, use defaults.
    # Then determine whether we should update value or not (Read temperature)

    for index, device in enumerate(busdevices):
        # print(device.__dict__)
        if device.sensorid in previnputids:
            try:
                newpollfreq = float(previnputs[previnputids.index(
                    device.sensorid)]['pollfreq'])
            except ValueError:
                device.pollfreq = float(default_dict['inputpollfreq'])
            else:
                if newpollfreq >= 0:
                    device.pollfreq = float(previnputs[previnputids.index(
                        device.sensorid)]['pollfreq'])
                else:
                    device.pollfreq = float(default_dict['inputpollfreq'])

            device.ontime = previnputs[previnputids.index(
                device.sensorid)]['ontime']
            device.offtime = previnputs[previnputids.index(
                device.sensorid)]['offtime']
            device.polltime = previnputs[previnputids.index(
                device.sensorid)]['polltime']
            device.value = previnputs[previnputids.index(
                device.sensorid)]['value']
            device.log_options = previnputs[previnputids.index(
                device.sensorid)]['log_options']
        else:
            device.pollfreq = float(default_dict['inputpollfreq'])
            device.ontime = ''
            device.offtime = ''
            device.polltime = ''
            device.value = ''
        """
        We're going to set a name because calling things by their ids is getting
        a bit ridiculous, but we can't have empty name fields if we rely on them
        being there. They need to be unique, so we'll name them by type and increment them

        Not really sure why this is conditional?
        """

        if device.type in ['DS18B20', 'DS1825']:

            # Get name if one exists
            name = dblib.sqlitedatumquery(
                pilib.dirs.dbs.control,
                'select name from ioinfo where id=\'' + device.sensorid + '\'')

            # If doesn't exist, check to see if proposed name exists. If it doesn't, add it.
            # If it does, keep trying.

            if name == '':
                for rangeindex in range(100):
                    # check to see if name exists
                    name = device.type + '-' + str(int(index + 1))
                    # print(name)
                    foundid = dblib.sqlitedatumquery(
                        pilib.dirs.dbs.control,
                        'select id from ioinfo where name=\'' + name + '\'')
                    # print('foundid' + foundid)
                    if foundid:
                        pass
                    else:
                        dblib.sqlitequery(
                            pilib.dirs.dbs.control,
                            dblib.makesqliteinsert(
                                'ioinfo',
                                valuelist=[device.sensorid, name],
                                valuenames=['id', 'name']))
                        break
            device.name = name

            device.time_since_last = datalib.timestringtoseconds(
                datalib.gettimestring()) - datalib.timestringtoseconds(
                    device.polltime, defaulttozero=True)

            # Is it time to read temperature?
            if device.time_since_last > device.pollfreq:
                utility.log(
                    pilib.dirs.logs.io, 'reading temperature [' + device.name +
                    '][' + device.id + ']', 9, pilib.loglevels.io)
                device.readprop('temperature', myProxy)
                device.polltime = datalib.gettimestring()
                device.value = device.temperature.decode('utf-8')
            else:
                utility.log(
                    pilib.dirs.logs.io,
                    'not time to poll',
                    9,
                    pilib.loglevels.io,
                )
                # print('not time to poll')

            device.unit = 'F'

        # We update the device and send them back for other purposes.
        busdevices[index] = device

    return busdevices
示例#20
0
def application(environ, start_response):
    import cgi
    import json
    import hashlib

    # Set top folder to allow import of modules

    import os, sys, inspect

    top_folder = \
        os.path.split(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])))[0]
    if top_folder not in sys.path:
        sys.path.insert(0, top_folder)

    import ii_netstats

    from iiutilities import dblib, datalib
    from iiutilities.utility import newunmangle
    from time import time

    # post_env = environ.copy()
    # post_env['QUERY_STRING'] = ''
    # post = cgi.FieldStorage(
    #     fp=environ['wsgi.input'],
    #     environ=post_env,
    #     keep_blank_values=True
    # )
    # formname = post.getvalue('name')
    #
    # output = {}
    #
    # d = {}
    # for k in post.keys():
    #     # print(k)
    #     d[k] = post.getvalue(k)

    try:
        request_body_size = int(environ.get('CONTENT_LENGTH', 0))
    except ValueError:
        request_body_size = 0

    request_body = environ['wsgi.input'].read(request_body_size)
    post = json.loads(request_body.decode('utf-8'))

    status = '200 OK'
    output = {'data': [], 'message': ''}

    d = post

    wsgiauth = False
    authverified = False

    if wsgiauth:
        # Verfiy that session login information is legit: hashed password, with salt and username, match
        # hash stored in database.
        import hashlib

        if 'sessionuser' in d:
            output['message'] += 'Session user is ' + d['sessionuser'] + '. '
        else:
            output['message'] += 'No session user found. '
            d['sessionuser'] = ''

        try:
            condition = "name='" + d['sessionuser'] + "'"
            userdata = dblib.readonedbrow(inventorylib.sysvars.dirs.dbs.safe,
                                          'users',
                                          condition=condition)[0]
        except:
            output[
                'message'] += 'error in user sqlite query for session user "' + d[
                    'sessionuser'] + '". '
            userdata = {'accesskeywords': 'demo', 'admin': False}
        else:
            # Get session hpass to verify credentials
            hashedpassword = d['sessionhpass']
            hname = hashlib.new('sha1')
            hname.update(d['sessionuser'])
            hashedname = hname.hexdigest()
            hentry = hashlib.new('md5')
            hentry.update(hashedname + netstats.salt + hashedpassword)
            hashedentry = hentry.hexdigest()
            if hashedentry == userdata['password']:
                # successful auth
                output['message'] += 'Password verified. '
                authverified = True
    else:
        output['message'] += 'WSGI authorization not enabled. '

    if authverified or not wsgiauth:
        try:
            action = d['action']
        except KeyError:
            output['message'] = 'no action in request'
        else:
            # Stock functions
            if action == 'getnetstatsdata':
                output['message'] += 'getting netstats keyword found. '
                import datetime
                the_day = datetime.date.today()
                if 'day' in d:
                    # We will pass in a day in format yyyy-mm-dd or keywords, like 'today'
                    import datetime, time
                    today = datetime.date.today()
                    if d['day'] == 'today':
                        pass
                    elif d['day'] == 'prev_day':
                        the_day = today - datetime.timedelta(days=1)
                    elif d['day'] == 'prev_2_day':
                        the_day = today - datetime.timedelta(days=2)
                    elif d['day'] == 'prev_3_day':
                        the_day = today - datetime.timedelta(days=3)
                    elif d['day'] == 'prev_4_day':
                        the_day = today - datetime.timedelta(days=4)

                if the_day == datetime.date.today():
                    db_path = ii_netstats.netstats_dbpath
                else:
                    db_path_root = ii_netstats.netstats_dbpath.split('.db')[0]
                    date_string = '{}-{:02d}-{:02d}'.format(
                        the_day.year, the_day.month, the_day.day)
                    db_path = '{}_{}.db'.format(db_path_root, date_string)

                print('** DBPATH: {} '.format(db_path))
                netstats_db = dblib.sqliteDatabase(db_path)

                output['message'] += 'db path {} chosen. '.format(db_path)

                wired_history = netstats_db.read_table('wired')
                if 'dataperiod' in d:
                    output['message'] += 'Limiting returned time to ' + d[
                        'dataperiod'] + '. '
                    # default 6hrs
                    period = 6 * 3600
                    if d['dataperiod'] == '6_hrs':
                        period = 6 * 3600
                    elif d['dataperiod'] == '12_hrs':
                        period = 12 * 3600
                    elif d['dataperiod'] == '24_hrs':
                        period = 24 * 3600
                    elif d['dataperiod'] == '48_hrs':
                        period = 48 * 3600
                    elif d['dataperiod'] == '7_days':
                        period = 7 * 24 * 3600

                    unmodified_length = len(wired_history)

                    # return only data within last period
                    from operator import itemgetter
                    from iiutilities.datalib import timestringtoseconds
                    new_list = sorted(wired_history,
                                      key=itemgetter('time'),
                                      reverse=True)

                    output['message'] += 'Most recent data point: ' + new_list[
                        0]['time'] + '. '
                    new_history = []
                    most_recent_time_in_seconds = timestringtoseconds(
                        new_list[0]['time'])
                    output['message'] += 'Most recent time in seconds ' + str(
                        most_recent_time_in_seconds) + '. '

                    output['message'] += 'Oldest time in seconds ' + str(
                        timestringtoseconds(new_list[-1]['time']))
                    output['message'] += 'Span of ' + str(
                        most_recent_time_in_seconds -
                        timestringtoseconds(new_list[-1]['time'])) + '. '
                    output['message'] += 'Period of ' + str(period) + '. '

                    for item in new_list:
                        if most_recent_time_in_seconds - timestringtoseconds(
                                item['time']) < period:
                            new_history.append(item)
                    output['data'] = new_history
                    modified_length = len(wired_history)

                    output['message'] += 'Shortened data from ' + str(
                        unmodified_length) + ' to ' + str(modified_length)
                else:
                    output['data'] = wired_history
                try:
                    from urllib2 import urlopen
                    my_ip = urlopen('http://ip.42.pl/raw').read()
                except:
                    my_ip = 'unknown'
                output['host'] = my_ip
            elif action == 'gettraffichistodata':
                output['message'] += 'gettraffic histo keyword found. '
                access_db = dblib.sqliteDatabase(ii_netstats.access_dbpath)

                access_db_tablenames = access_db.get_table_names()
                # output['message'] += 'Tables to search through: ' + str(access_db_tablenames) + '. '

                tables_to_fetch = []
                for tablename in access_db_tablenames:
                    if tablename.find('remotehisto') >= 0 or tablename.find(
                            'metadata') >= 0:
                        tables_to_fetch.append(tablename)
                # output['message'] += 'Fetching tables ' + str(tables_to_fetch) + '. '
                output['data'] = {}
                for table_to_fetch in tables_to_fetch:
                    output['data'][table_to_fetch] = access_db.read_table(
                        table_to_fetch)

            elif action == 'postwirelessdata':
                output['message'] += 'postwirelessdata keyword found. '

                # nothing here yet

    if 'data' in output:
        if output['data']:
            newetag = hashlib.md5(str(output['data'])).hexdigest()
            if 'etag' in d:
                if newetag == d['etag']:
                    status = '304 Not Modified'
                    output['data'] = ''
        else:
            newetag = ''
    else:
        newetag = ''

    if 'datasize' in d:
        output['datasize'] = sys.getsizeof(output['data'])

    output['etag'] = newetag
    try:
        foutput = json.dumps(output, indent=1)
    except:
        import csv
        w = csv.writer(
            open("/usr/lib/iicontrollibs/inventory/dumperr.log", "w"))
        for key, val in output.items():
            w.writerow([key, val])

    response_headers = [('Content-type', 'application/json')]
    response_headers.append(('Etag', newetag))
    start_response(status, response_headers)

    return [foutput]
示例#21
0
def application(environ, start_response):
    import cgi
    import json
    import hashlib

    # Set top folder to allow import of modules

    import os, sys, inspect

    top_folder = \
        os.path.split(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])))[0]
    if top_folder not in sys.path:
        sys.path.insert(0, top_folder)

    import ii_netstats

    from iiutilities import dblib, datalib
    from iiutilities.utility import newunmangle
    from time import time

    # post_env = environ.copy()
    # post_env['QUERY_STRING'] = ''
    # post = cgi.FieldStorage(
    #     fp=environ['wsgi.input'],
    #     environ=post_env,
    #     keep_blank_values=True
    # )
    # formname = post.getvalue('name')
    #
    # output = {}
    #
    # d = {}
    # for k in post.keys():
    #     # print(k)
    #     d[k] = post.getvalue(k)

    try:
        request_body_size = int(environ.get('CONTENT_LENGTH', 0))
    except ValueError:
        request_body_size = 0

    request_body = environ['wsgi.input'].read(request_body_size)
    post = json.loads(request_body.decode('utf-8'))


    status = '200 OK'
    output = {'data': [], 'message': ''}

    d = post

    wsgiauth = False
    authverified = False

    if wsgiauth:
        # Verfiy that session login information is legit: hashed password, with salt and username, match
        # hash stored in database.
        import hashlib

        if 'sessionuser' in d:
            output['message'] += 'Session user is ' + d['sessionuser'] + '. '
        else:
            output['message'] += 'No session user found. '
            d['sessionuser'] = ''

        try:
            condition = "name='" + d['sessionuser'] + "'"
            userdata = dblib.readonedbrow(inventorylib.sysvars.dirs.dbs.safe, 'users', condition=condition)[0]
        except:
            output['message'] += 'error in user sqlite query for session user "' + d['sessionuser'] + '". '
            userdata = {'accesskeywords':'demo','admin':False}
        else:
            # Get session hpass to verify credentials
            hashedpassword = d['sessionhpass']
            hname = hashlib.new('sha1')
            hname.update(d['sessionuser'])
            hashedname = hname.hexdigest()
            hentry = hashlib.new('md5')
            hentry.update(hashedname + netstats.salt + hashedpassword)
            hashedentry = hentry.hexdigest()
            if hashedentry == userdata['password']:
                # successful auth
                output['message'] += 'Password verified. '
                authverified = True
    else:
        output['message'] += 'WSGI authorization not enabled. '

    if authverified or not wsgiauth:
        try:
            action = d['action']
        except KeyError:
            output['message'] = 'no action in request'
        else:
            # Stock functions
            if action == 'getnetstatsdata':
                output['message'] += 'getting netstats keyword found. '
                import datetime
                the_day = datetime.date.today()
                if 'day' in d:
                    # We will pass in a day in format yyyy-mm-dd or keywords, like 'today'
                    import datetime, time
                    today = datetime.date.today()
                    if d['day'] == 'today':
                        pass
                    elif d['day'] == 'prev_day':
                        the_day = today - datetime.timedelta(days=1)
                    elif d['day'] == 'prev_2_day':
                        the_day = today - datetime.timedelta(days=2)
                    elif d['day'] == 'prev_3_day':
                        the_day = today - datetime.timedelta(days=3)
                    elif d['day'] == 'prev_4_day':
                        the_day = today - datetime.timedelta(days=4)

                if the_day == datetime.date.today():
                    db_path = ii_netstats.netstats_dbpath
                else:
                    db_path_root = ii_netstats.netstats_dbpath.split('.db')[0]
                    date_string = '{}-{:02d}-{:02d}'.format(the_day.year, the_day.month, the_day.day)
                    db_path = '{}_{}.db'.format(db_path_root, date_string)

                print('** DBPATH: {} '.format(db_path))
                netstats_db = dblib.sqliteDatabase(db_path)

                output['message'] += 'db path {} chosen. '.format(db_path)

                wired_history =netstats_db.read_table('wired')
                if 'dataperiod' in d:
                    output['message'] += 'Limiting returned time to ' + d['dataperiod'] + '. '
                    # default 6hrs
                    period = 6 * 3600
                    if d['dataperiod'] == '6_hrs':
                        period = 6 * 3600
                    elif d['dataperiod'] == '12_hrs':
                        period = 12 * 3600
                    elif d['dataperiod'] == '24_hrs':
                        period = 24 * 3600
                    elif d['dataperiod'] == '48_hrs':
                        period = 48 * 3600
                    elif d['dataperiod'] == '7_days':
                        period = 7 * 24 * 3600

                    unmodified_length = len(wired_history)

                    # return only data within last period
                    from operator import itemgetter
                    from iiutilities.datalib import timestringtoseconds
                    new_list = sorted(wired_history, key=itemgetter('time'), reverse=True)

                    output['message'] += 'Most recent data point: ' + new_list[0]['time'] + '. '
                    new_history = []
                    most_recent_time_in_seconds = timestringtoseconds(new_list[0]['time'])
                    output['message'] += 'Most recent time in seconds ' + str(most_recent_time_in_seconds) + '. '

                    output['message'] += 'Oldest time in seconds ' + str(timestringtoseconds(new_list[-1]['time']))
                    output['message'] += 'Span of ' + str(most_recent_time_in_seconds - timestringtoseconds(new_list[-1]['time']))  + '. '
                    output['message'] += 'Period of ' + str(period) + '. '


                    for item in new_list:
                        if most_recent_time_in_seconds - timestringtoseconds(item['time']) < period:
                            new_history.append(item)
                    output['data'] = new_history
                    modified_length = len(wired_history)

                    output['message'] += 'Shortened data from ' + str(unmodified_length) + ' to ' + str(modified_length)
                else:
                    output['data'] = wired_history
                try:
                    from urllib2 import urlopen
                    my_ip = urlopen('http://ip.42.pl/raw').read()
                except:
                    my_ip = 'unknown'
                output['host'] = my_ip
            elif action == 'gettraffichistodata':
                output['message'] += 'gettraffic histo keyword found. '
                access_db = dblib.sqliteDatabase(ii_netstats.access_dbpath)

                access_db_tablenames = access_db.get_table_names()
                # output['message'] += 'Tables to search through: ' + str(access_db_tablenames) + '. '

                tables_to_fetch = []
                for tablename in access_db_tablenames:
                    if tablename.find('remotehisto') >= 0 or tablename.find('metadata') >= 0:
                        tables_to_fetch.append(tablename)
                # output['message'] += 'Fetching tables ' + str(tables_to_fetch) + '. '
                output['data'] = {}
                for table_to_fetch in tables_to_fetch:
                    output['data'][table_to_fetch] = access_db.read_table(table_to_fetch)

            elif action == 'postwirelessdata':
                output['message'] += 'postwirelessdata keyword found. '

                # nothing here yet

    if 'data' in output:
        if output['data']:
            newetag = hashlib.md5(str(output['data'])).hexdigest()
            if 'etag' in d:
                if newetag == d['etag']:
                    status = '304 Not Modified'
                    output['data'] = ''
        else:
            newetag=''
    else:
        newetag=''

    if 'datasize' in d:
        output['datasize'] = sys.getsizeof(output['data'])

    output['etag'] = newetag
    try:
        foutput = json.dumps(output, indent=1)
    except:
        import csv
        w = csv.writer(open("/usr/lib/iicontrollibs/inventory/dumperr.log", "w"))
        for key, val in output.items():
            w.writerow([key, val])

    response_headers = [('Content-type', 'application/json')]
    response_headers.append(('Etag',newetag))
    start_response(status, response_headers)

    return [foutput]