예제 #1
0
def get_ambient(num=num):
    metadata = psql.get_metadata()
    table_dict = psql.setup_tables(metadata)
    table_name = 'ambient_table'
    columns=['id','datetime','ambient_temp','humidity']
    output,rows = get_rows(table_dict[table_name],columns,num)
    return [output,columns,rows]
예제 #2
0
def get_switch(num=num):
    metadata = psql.get_metadata()
    table_dict = psql.setup_tables(metadata)
    table_name = 'switch_table'
    columns=['id','datetime','switch']
    output,rows = get_rows(table_dict[table_name],columns,num)
    return [output,columns,rows]
예제 #3
0
def get_inside(num=num):
    metadata = psql.get_metadata()
    table_dict = psql.setup_tables(metadata)
    table_name = 'inside_table'
    columns=['id','datetime','inside_temp1','inside_temp2']
    output,rows = get_rows(table_dict[table_name], columns, num)
    return [output,columns,rows]
예제 #4
0
def get_zwave(num=num):
    #app.logger.error('An error occurred')
    metadata = psql.get_metadata()
    table_dict = psql.setup_tables(metadata)
    table_name = 'zwave_table'
    columns=['id','datetime','house_Voltage','house_Current','house_Power','house_Energy']
    output,rows = get_rows(table_dict[table_name],columns,num)
    return [output,columns,rows]
예제 #5
0
def get_mfi(num=num):
    metadata = psql.get_metadata()
    table_dict = psql.setup_tables(metadata)
    table_name = 'mfi_table'
    columns=['id','datetime','v_rms1','v_rms2','v_rms3','i_rms1','i_rms2','i_rms3',\
    'pf1','pf2','pf3','energy_sum1','energy_sum2','energy_sum3','active_pwr1',\
    'active_pwr2','active_pwr3','relay1','relay2','relay3']
    output,rows = get_rows(table_dict[table_name],columns,num)
    return [output,columns,rows]
예제 #6
0
def get_rows_wrapper():
    try:
        metadata = psql.get_metadata()
        table_dict = psql.setup_tables(metadata)
        table = request.args.get('table')
        columns = request.args.get('columns').split(',')
        if 'id' in columns:
            columns.remove('id')
        if 'datetime' in columns:
            columns.remove('datetime')
        columns.insert(0,'id')
        columns.insert(1,'datetime')
        num = int(request.args.get('num'))
        output = get_rows(table_dict[table], columns, int(num))
    except Exception , e :
        out = 'Query Failed.\n'
        out += str(request.args)
        out += str(e)

        return make_response(out, 404)
예제 #7
0
#!/usr/bin/env python2
# Copyright 2016 The Flexbox Authors. All rights reserved.
# Licensed under the open source MIT License, which is in the LICENSE file.
import time
from flexbox import sensors
from flexbox import psql
from datetime import datetime

LOOP_WAIT = 0.25
HEARTBEAT = 60

metadata = psql.get_metadata()
table_dict = psql.setup_tables(metadata)
print 'Starting Switch at ' + str(datetime.now())
recorded_output_dict = None
time1 = datetime.now()

while True:
    time.sleep(LOOP_WAIT)
    isOpen = sensors.get_magnetic_switch()
    time_diff = (datetime.now() - time1).seconds
    if isOpen is not None:
        if recorded_output_dict is None or isOpen != recorded_output_dict[
                'switch'] or time_diff > HEARTBEAT:
            output_dict = {'switch': isOpen}
            psql.add_values_to_table(table_dict['switch_table'], output_dict)
            recorded_output_dict = output_dict
            time1 = datetime.now()
예제 #8
0
def dr_local_control(mfi_hostname, mfi_outlet):
    '''
    current_command: The command that has been pulled from the server.

    new_mfi_state: The state to be sent to the mfi. It is typically the opposite of the current_command variable.
    For example, if the command is 1 because a peak shaving event is occuring, the new_mfi_state = 0.
    This relationship breaks down during other constraints, such as:
    -if the fridge has already been off past its required number of hours
    -if the fridge crosses past its upper deadband + X degrees.
    -if the connection is lost to the server.

    last_command: The previous command that the script registered from the server.
    1 indicates a peak shaving event, 0 indicates no peak shaving event.

    (DEPRECATED) last_command_datetime: The time (in utc) of the most recent registered command from the server.
    This is used in addition to the limit_counter to determine whether max_off_hours has been reached.

    (DEPRECATED) last_limit_counter: The time (in seconds) that have so far been counted against the max_off_hours variable.
    This is used to store the time spent during previous events when there are multiple events within one day.

    last_control_source: The source of the most recent control signal for the mfi.
    The two current options are:
    'server' (as in it read the value direct from the server) or
    'required_to_be_off' (as in it stopped participating in the event because it is during a time that it has been
    hardcoded to remain off)
    '''

    current_temp_list = sensors.get_inside_fridge_temps().values()
    current_temp_list = [
        temp for temp in current_temp_list if temp < 50000 and temp > -25000
    ]
    current_temp = float(
        sum(value
            for value in current_temp_list) / len(current_temp_list)) / 1000.0
    control_source = 'server'
    #Signal
    lost_connection_to_server, current_command, new_mfi_state, control_type = get_signal_from_server(
    )

    #Limits based on type of DR signal
    with open('/etc/flexbox/demand_response.yaml') as f:
        dr_properties = yaml.safe_load(f)
    available_hours_list = dr_properties['available_hours_list']
    upper_temp_band = dr_properties['upper_temp_band']

    if control_type == 'peak_shaving':
        upper_temp_plus = dr_properties['upper_temp_band_plus_peak_shaving']
    else:
        upper_temp_plus = 0
    #Previous command information
    metadata = psql.get_metadata()
    table_dict = psql.setup_tables(metadata)

    status = ''
    last_row_dict_mfi = {}
    column_names_mfi = table_dict['mfi_table'].columns.keys()
    last_row_mfi = table_dict['mfi_table'].select().\
        where(table_dict['mfi_table'].c.datetime>datetime.utcnow()-timedelta(minutes=10)).\
        order_by(table_dict['mfi_table'].c.datetime.desc()).execute().fetchone()
    if last_row_mfi:
        for i, column in enumerate(column_names_mfi):
            last_row_dict_mfi[column] = last_row_mfi[i]
        actual_mfi_state = last_row_dict_mfi['relay3']
    else:
        actual_mfi_state = None
        status += 'No MFI state read, will send signal. '
    status += 'Actual MFI state currently is ' + str(actual_mfi_state) + '. '

    column_names = table_dict['demand_response'].columns.keys()
    last_row = table_dict['demand_response'].select().\
        where(cast(table_dict['demand_response'].c.local_date,Date)==datetime.now().date()).\
        order_by(table_dict['demand_response'].c.datetime.desc()).execute().fetchone()

    last_row_peak_shaving = table_dict['demand_response'].select().\
        where(table_dict['demand_response'].c.mfi_state==0).\
        where(table_dict['demand_response'].c.control_type=='peak_shaving').\
        where(table_dict['demand_response'].c.control_source=='server').\
        order_by(table_dict['demand_response'].c.datetime.desc()).execute().fetchone()

    last_row_peak_shaving_dict = {}

    if last_row_peak_shaving:
        for i, column in enumerate(column_names):
            last_row_peak_shaving_dict[column] = last_row_peak_shaving[i]

    last_row_dict = {}
    last_mfi_state = None
    last_control_source = None
    required_fridge_is_off = datetime.now().hour not in available_hours_list
    if last_row:
        for i, column in enumerate(column_names):
            last_row_dict[column] = last_row[i]
        last_command = 1 - last_row_dict['mfi_state']
        last_mfi_state = last_row_dict['mfi_state']
        last_command_datetime = last_row_dict['datetime']
        last_limit_counter = last_row_dict['limit_counter']
        last_control_type = last_row_dict['control_type']
        last_control_source = last_row_dict['control_source']
        limit_counter = (datetime.utcnow() -
                         last_command_datetime).seconds + last_limit_counter
        updated_limit_counter = limit_counter

    if required_fridge_is_off:
        new_mfi_state = 0
        control_source = 'required_to_be_off'
        control_type = 'none'
        status += 'This is not available for DR at this hour. '
        if last_row and last_mfi_state == 1:
            updated_limit_counter = last_limit_counter
        if last_row and last_control_source == 'required_to_be_off':
            updated_limit_counter = last_limit_counter
        elif not last_row:
            updated_limit_counter = 0
        else:
            updated_limit_counter = limit_counter
    elif last_row:

        #Constraints
        upper_temp_band_plus_reached = current_temp > (upper_temp_band +
                                                       upper_temp_plus)
        return_to_upper_temp_band_reached = current_temp < upper_temp_band

        print 'is fridge supposed to be hardcoded off right now?' + str(
            required_fridge_is_off)
        if not lost_connection_to_server:

            print 'Last Signal:' + str(last_command)
            print 'Last limit counter:' + str(last_limit_counter)
            print 'Current limit counter:' + str(limit_counter)
            print 'hours that fridge is available for DR:' + str(
                available_hours_list)
            print 'Current temp is ' + str(
                current_temp) + " with band+limit as " + str(
                    upper_temp_band) + " + " + str(upper_temp_plus)

        if lost_connection_to_server:
            print 'Lost connection at: ' + str(datetime.now())
            print 'Last limit counter:' + str(last_limit_counter)
            control_source = 'lost_connection_to_server'
            try:
                with open("signals_local.json", "rb") as infile:
                    json_signal = json.load(infile)
                current_command, new_mfi_state, control_type = get_signal_from_dict(
                    json_signal)
            except:
                print "Can't read from json file and lost connection to server"
                pass
        elif current_command == 1:
            print 'Current Signal:' + str(current_command)
            if last_command == 1 and upper_temp_band_plus_reached:
                new_mfi_state = 1
                status+='However, this has exceeded the upper temperature limit, '+\
                    'so we are overriding until we cool down to the upper band.'
                control_source = 'max_temp_limit_reached'
            elif last_command == 1 and last_control_source == 'required_to_be_off':
                status += 'This is now available and engaged in a DR event. '
            elif last_command == 1 and last_control_source == 'lost_connection_to_mfi':
                status += 'This DR event is starting because mfi connection returned. '
            elif last_command == 1:
                status += 'This DR event is continuing. '
            elif last_command == 0 and last_control_source in [ 'max_temp_limit_reached',\
                                                                'lost_connection_to_server',\
                                                                'lost_connection_to_mfi']:
                updated_limit_counter = last_limit_counter
                if last_control_source in [
                        'lost_connection_to_server', 'lost_connection_to_mfi'
                ]:
                    status+='The last entry in the database was a lost connection value,'+\
                        ' so we are writing to indicate connection was restored. '
                elif return_to_upper_temp_band_reached:
                    status += 'We are no longer above the upper band, so we are joining the DR event again. '
                else:
                    new_mfi_state = 1
                    control_source = last_control_source
                    status += 'This DR event is being overriden by ' + last_control_source + '. '
            elif last_command == 0:
                updated_limit_counter = last_limit_counter
                status += 'We were given the signal to begin a new DR event. '
        elif current_command == 0:
            if last_command == 1 and last_control_source == 'required_to_be_off':
                status += 'We are now available for DR events again.'
                updated_limit_counter = last_limit_counter
            elif last_command == 1:
                status += 'We were given the signal to stop this DR event. '
            elif last_command == 0 and last_control_source == 'lost_connection_to_server':
                updated_limit_counter = last_limit_counter
                status += 'We are not in a DR event but we have re-established the server connection. '
            elif last_command == 0 and last_control_source == 'lost_connection_to_mfi':
                status += 'We are not in a DR event but we have re-established the mfi connection. '
                updated_limit_counter = last_limit_counter
            elif last_command == 0 and last_control_source == 'max_temp_limit_reached':
                status += 'We are not in a DR event so we no longer need to worry about temperature limits. '
                updated_limit_counter = last_limit_counter
            elif last_command == 0:
                status += 'We are not in a DR event. '
    elif not lost_connection_to_server:
        updated_limit_counter = 0
        status += 'No previous command yet today. '
        '''
        if required_fridge_is_off:
            new_mfi_state = 0
            control_source = 'required_to_be_off'
            control_type = 'none'
            status+='This is not available for DR at this hour. '
        '''
        if current_command == 1:
            status += 'Beginning first DR event. '
            updated_limit_counter = 0
        elif current_command == 0:
            pass
    else:
        updated_limit_counter = 0
        control_source = 'lost_connection_to_server'
        status += 'Has not yet found a connection to server: ' + str(
            datetime.now()) + '. '

    if actual_mfi_state == None or actual_mfi_state != new_mfi_state:
        status += 'Send Command to MFI.'
        mfi_success = mfi.control_mfi(mfi_hostname, new_mfi_state, mfi_outlet)
        print mfi_success
        print 'Last actual mfi state was ' + str(actual_mfi_state)
        print 'Last recorded mfi state was ' + str(last_mfi_state)
        print 'Currently sending mfi state as ' + str(new_mfi_state)
        if not mfi_success:
            status += 'Lost connection to mfi. '
            control_source = 'lost_connection_to_mfi'

    status += 'Adding a row to the database. '
    update_mfi_state_sql(table_dict['demand_response'], new_mfi_state,
                         control_source, control_type, updated_limit_counter)

    print status
예제 #9
0
import socket
import struct
import sys
import random

PCOUNT = 6
PINTERVAL = 30  # Seconds less than a minute
BINTERVAL = 120  # Minutes
BWSIZE = 375000  # Three Megabits (Mb) in Bytes (B)
BWSEND = ''.join([str(random.randint(0, 9)) for x in range(BWSIZE)])

SERVERNAME = 'yourserverdomain.com'
PORT = 51337

# Database Setup
network_tests = psql.setup_tables(psql.get_metadata())['network_tests']

# TODO
# Use arrow arithmetic and arrow objects instead of seconds and magic numbers
# Add a SIGTERM handler
# xmit: optimize by pooling data together


def write_db(data):
    psql.add_values_to_table(network_tests, data)


def get_modem_usage():
    try:
        r = requests.get(
            'http://192.168.8.1/api/monitoring/traffic-statistics')
예제 #10
0
def main():
    with open(os.path.join(YAMLCFGDIR, YAMLCFGFILE), 'r') as f:
        cfg = yaml.safe_load(f)
        past_days = int(cfg['past_days'])
        tables = cfg['tables']
        backup_path = os.path.join(cfg['dev'], cfg['dir'],
                                   socket.gethostname())
        table_settings = cfg['table_settings']
        if not os.path.exists(backup_path):
            os.makedirs(backup_path)

    current_date = datetime(*(time.gmtime()[0:3]))
    start_date = current_date - timedelta(days=past_days)
    metadata = psql.get_metadata()
    table_dict = psql.setup_tables(metadata)

    data = {}
    metadata = []
    earliest_datapoint_date = datetime(3000, 1, 1)
    latest_datapoint_date = datetime(1970, 1, 1)
    for table_name in tables:
        test_date = start_date
        table = table_dict[table_name]
        values = []

        #Look for first id after the requested date
        test_values = []
        while test_date <= current_date:
            test_values = table.select().where(
                and_(test_date <= table.c.datetime, table.c.datetime <
                     (test_date + timedelta(days=1)))).execute()
            test_values_cols = test_values.keys()
            test_values = test_values.fetchall()
            if len(test_values):
                break
            test_date += timedelta(days=1)

        #test_values, table min,max limits,
        #write out csv as another fcn table_name, colnames, values
        if len(test_values):
            #pull data starting from the entry with that date
            start_date_id = min(
                zip(*test_values)[test_values_cols.index('id')])
            count = sqlalchemy.select([
                sqlalchemy.func.count()
            ]).select_from(table).execute().fetchone()[0]
            min_pull = max(
                int(table_settings[table_name]['min']) * past_days,
                count - start_date_id + 1)
            limit = min(min_pull,
                        int(table_settings[table_name]['max']) * past_days)
            values = table.select().order_by(table.c.id).limit(
                limit).execute()  #.where(table.c.id >= start_date_id).
            colnames = values.keys()
            values = values.fetchall()
            first_datapoint_date = min(
                zip(*values)[colnames.index('datetime')])
            last_datapoint_date = max(zip(*values)[colnames.index('datetime')])

            csvIO = StringIO.StringIO()
            outCSV = csv.writer(csvIO)
            outCSV.writerow(colnames)
            outCSV.writerows(values)
            data[table_name] = csvIO

            #metadata and logging
            earliest_datapoint_date = min(earliest_datapoint_date,
                                          first_datapoint_date)
            latest_datapoint_date = max(latest_datapoint_date,
                                        last_datapoint_date)
            report = REPORTSTR.format(APPNAM, APPVER, table_name, start_date,
                                      test_date, first_datapoint_date,
                                      start_date_id, last_datapoint_date,
                                      len(values), count)
            metadata.append(report)
            print(report)
            logging.info(report)
        print(table_name)

    with open(LOGYAMLCFGFILE, 'r') as f:
        cfg = yaml.safe_load(f)
        log_name = '-'.join([APPNAM, APPVER, __name__])
        log_dir = cfg['log_dir']

    out_file = None
    if len(sys.argv) > 1:
        out_file = sys.argv[1]
        out_file = str(time.time()) + '_' + out_file
    tar_dump(backup_path, data, earliest_datapoint_date, latest_datapoint_date,
             StringIO.StringIO("\n".join(metadata)), [YAMLCFGDIR, log_dir],
             'V' + APPVER, out_file)