def snmp_agent_start(): """ Starts SNMP agent """ from server.snmp import agent # SNMP agent must be placed in thread rather than # multiprocess process because dynamic variables are # shared between main process and the agent obj_snmpa = agent.SNMPAgent() try: th_snmpa = threading.Thread( target=obj_snmpa.engine, args=() ) th_snmpa.start() MPQ_STAT.put_nowait([ 'snmp_notify', th_snmpa.ident ]) except threading.ThreadError: log = 'Could not start SNMP agent due to threading error.' logger.exception(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) MPQ_STAT.put_nowait([ 'snmp_notify', False ])
def i2c_lane_set(self, addr_ln: int, stat_en: bool = True, attempts: int = 3): """ Sets lane for I2C network :param addr_ln: int :param stat_en: bool :param attempts: int :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ attempt = 0 err_iface = False stat_iface = STAT_LVL['op'] # Cycle through attempts if addr_ln != self.addr_ln: for attempt in range(1, (attempts + 1)): err_iface = self.obj_janus.i2c_set_lane(addr_ln=addr_ln) if err_iface: err_iface = True if stat_en: MPQ_STAT.put_nowait( ['base', ['interface', STAT_LVL['op_err']]]) # Only log warning on last attempt, keeps log clean if attempt == attempts: log = 'Attempt {0} of {1} to '.format(attempt, attempts) + \ 'set interface to lane {0} failed.'.format(addr_ln) logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'WARNING', log]) else: err_iface = False self.addr_ln = addr_ln break if err_iface: stat_iface = STAT_LVL['op_err'] if stat_en: MPQ_STAT.put_nowait(['base', ['interface', stat_iface]]) log = 'Set lane {0} on four port interface failed.'.format(addr_ln) logger.critical(log) else: log = 'Set lane {0} on four port interface succeeded after {1} attempts.'. \ format(addr_ln, attempt) logger.info(log) return stat_iface
def reset(obj_iface: TYPE_INTERFACE, addr_ln: int): """ Resets lane :param obj_iface: Interface Object :param addr_ln: int :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ # Change logging level since this operates in multiprocess # Cycle to last entry for most current log setting while not MPQ_SETUP_LOG_RESET.empty(): mpq_record = MPQ_SETUP_LOG_RESET.get() if mpq_record[0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif mpq_record[0] == 'INFO': logger.setLevel(logging.INFO) elif mpq_record[0] == 'ERROR': logger.setLevel(logging.ERROR) elif mpq_record[0] == 'WARNING': logger.setLevel(logging.WARNING) elif mpq_record[0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) # Write HIGH value to lane GPIO pin # Hold HIGH for 50 milliseconds stat_iface = obj_iface.gpio_write(addr_ln=addr_ln, data_iface_in=1, stat_en=False) time.sleep(0.050) # Write LOW value to lane GPIO pin # Hold LOW for 2 milliseconds if not stat_iface: stat_iface = obj_iface.gpio_write(addr_ln=addr_ln, data_iface_in=0, mode=False, stat_en=False) time.sleep(0.050) if stat_iface == STAT_LVL['crit']: log = 'Lane {0} network reset can not be completed due to interface errors.'.format( addr_ln) logger.critical(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'CRITICAL', log]) else: log = 'Lane {0} network reset complete.'.format(addr_ln) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) print(log) return stat_iface
def comms_module(data_cdb_out: dict, obj_iface: TYPE_INTERFACE = ''): """ Uploads module address and configuration to connected module :param data_cdb_out: dict :param obj_iface: Interface Object :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ triggers_out = [] stat_iface = STAT_LVL['op'] # Cycle through module's installed sensors for addr_s in range(0, data_cdb_out['num_sensors']): sensor = 'S{0}'.format(addr_s) # Build I2C data packet for sensor low and high triggers trigger_low = struct.unpack( '4B', struct.pack('>f', float(data_cdb_out[sensor]['trig_low'])))[::-1] trigger_high = struct.unpack( '4B', struct.pack('>f', float(data_cdb_out[sensor]['trig_high'])))[::-1] for index in trigger_low: triggers_out.append(index) for index in trigger_high: triggers_out.append(index) # Upload low trigger values to module if len(triggers_out) > 0: stat_iface = obj_iface.i2c_write( addr_ln=data_cdb_out['lane_addr'], addr_mod=data_cdb_out['mod_addr'], addr_mem=MMAP[data_cdb_out['mem_map_ver']]['S0_LTRIG'][0], data_iface_in=triggers_out) if stat_iface: log = 'Triggers for lane {0} '.format(data_cdb_out['lane_addr']) + \ 'module {0} could not be updated.'.format(data_cdb_out['mod_addr']) + \ 'due to interface error.' logger.warning(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log]) else: log = 'Triggers for lane {0} '.format(data_cdb_out['lane_addr']) + \ 'module {0} were successfully updated.'.format(data_cdb_out['mod_addr']) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) return stat_iface
def store(): """ Stores core time in file :return stat_time: STAT_LVL['op'] or STAT_LVL['op_err'] :return stat_cdb: STAT_LVL['op'] or STAT_LVL['crit'] """ stat_time = STAT_LVL['op'] # Update time document in CouchDB config database data_cdb_out, stat_cdb, code_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='time', data_cdb_in={'time': str(time.time())}, logfile=logfile ) if stat_cdb: log = 'Could not save time due to CouchDB document update error.' logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) MPQ_STAT.put_nowait([ 'base', [ 'couchdb', STAT_LVL['op_err'] ] ]) log = 'Time storage complete.' logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) if not stat_time: MPQ_STAT.put_nowait([ 'base', [ 'tasks', stat_time ] ])
def down(): """ Determines JanusESS down time :return time_down: int """ # Get time document from CouchDB config database data_cdb_out, stat_cdb, code_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='time', logfile=logfile ) time_down = 0 if not stat_cdb: # The following condition checks that system time is valid if float(time.time()) > float(data_cdb_out['time']): # Compute JanusESS down time time_down = round( (int(float(time.time())) - int(float(data_cdb_out['time']))) / 60, 0 ) log = 'JanusESS downtime was {0} minutes'.format(time_down) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) else: log = 'Could not determine JanusESS down time due to CouchDB document retrieval error.' logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) return time_down
def update(interval: int): """ Produces timed status reports :param interval: int """ log = 'Conducting {0}-hour system status check.'.format(interval) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) # Only issue messaging if recent network check shows it is up send_mail( msg_type='status_dispatch', args=[], ) log = '{0}-hour status check completed.'.format(interval) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['tasks', STAT_LVL['op']]])
def snmp_notify_start(self): """ Starts SNMP notify self.args[0] = host ip self.args[1] = host port self.args[1] = host community """ from server.snmp import notification # SNMP notifier must be placed in thread rather than # multiprocess process because dynamic variables are shared # between main process and the agent obj_snmpn = notification.SNMPNotify( self.args[0], self.args[1], self.args[2] ) try: th_snmpn = threading.Thread( target=obj_snmpn.listener, args=() ) th_snmpn.start() MPQ_STAT.put_nowait([ 'snmp_notify', th_snmpn.ident ]) except threading.ThreadError: log = 'Could not start SNMP notifier due to threading error.' logger.exception(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) MPQ_STAT.put_nowait([ 'snmp_notify', False ])
def listener(self): """ Listens for notification activity and assigns to appropriate process """ log = 'SNMP notification listener started.' logger.info(log) print(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['snmp_notify', STAT_LVL['op']]]) # Poll SNMP agent queues for values to update variables while True: if not MPQ_SNMPN_STOP.empty(): MPQ_SNMPN_STOP.get() break if not MPQ_SNMPN2.empty(): mpq_record = MPQ_SNMPN2.get() self.base(mpq_record=mpq_record) if not MPQ_SNMPN3.empty(): mpq_record = MPQ_SNMPN3.get() self.lane(mpq_record=mpq_record) if not MPQ_SNMPN4.empty(): mpq_record = MPQ_SNMPN4.get() self.module(mpq_record=mpq_record) if not MPQ_SNMPN5.empty(): mpq_record = MPQ_SNMPN5.get() self.poll_val(mpq_record=mpq_record) time.sleep(0.1) MPQ_STAT.put_nowait(['snmp_notify', False]) log = 'SNMP notification listener stopped.' logger.info(log) print(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['snmp_notify', STAT_LVL['not_cfg']]])
def dispatcher(self): # Register an imaginary never-ending job to keep I/O dispatcher running forever self.eng_snmp.transportDispatcher.jobStarted(1) # Run I/O dispatcher which would receive queries and send responses try: log = 'SNMP agent dispatcher started.' logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['snmp_agent', STAT_LVL['op']]]) self.eng_snmp.transportDispatcher.runDispatcher() except queue.Full: log = 'SNMP agent dispatcher experienced critical error.' logger.critical(log) print(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log]) MPQ_STAT.put_nowait(['base', ['snmp_agent', STAT_LVL['crit']]]) self.eng_snmp.tr0ansportDispatcher.closeDispatcher() raise
def poll_stop(self): """ Stops polling self.args[0] = addr_ln """ logfile = 'polling' logger = logging.getLogger(logfile) # Send request to poll dispatcher to immediately stop # polling on this lane MPQ_POLL_STOP.put_nowait(self.args[0]) log = 'Lane {0} polling stop request issued.'.format(self.args[0]) logger.info(log) print(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ])
def clear(): """ Produces timed status reports """ # Clear queues at most every 24 hours to prevent filling up. # There is no better way to clear a multiprocessing queue other than run a loop. log = 'Attempting to clear activity and status queues.' logger.debug(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) while not MPQ_STAT.empty(): MPQ_STAT.get() time.sleep(0.001) while not MPQ_ACT.empty(): MPQ_ACT.get() time.sleep(0.001) log = 'Activity and status queues cleared.' logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log])
def notify(self, mib_identity: str, mib_object: dict, notify_msg: dict): """ Dispatches prebuilt mib object :param mib_identity: str :param mib_object: dict :param notify_msg: dict """ log = notify_msg['start'] logger.debug(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) error_indication, error_status, error_index, var_binds = next( sendNotification( SnmpEngine(), CommunityData(self.host_community), UdpTransportTarget((self.host_ip, self.host_port)), ContextData(), 'inform', NotificationType(ObjectIdentity( 'JANUSESS-MIB', mib_identity + 'Notify').addMibSource( '/opt/Janus/ESS/python3/server/snmp/mibs'), objects=mib_object))) if error_indication: log = notify_msg['error'] + str(error_indication) logger.warning(log) print(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log]) MPQ_STAT.put_nowait(['base', ['snmp_notify', STAT_LVL['op_err']]]) else: log = notify_msg['end'] logger.info(log) print(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['snmp_notify', STAT_LVL['op']]])
def mdb_check(): logfile = 'janusess' logger = logging.getLogger(logfile) check_time = 3.0 log = 'Checking MariaDB every {0} sec until operational.'.format( check_time) logger.debug(log) count = 1 while True: try: mdb_conn = mysql.connector.connect( user='******', password='******', host='127.0.0.1', database='aurora', unix_socket='/run/mysqld/mysqld.sock') mdb_conn.close() stat_mdb = STAT_LVL['op'] log = 'MariaDB is operational.' logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['mariadb', stat_mdb]]) except mysql.connector.Error: stat_mdb = STAT_LVL['not_cfg'] log = 'Local MariaDB server did not respond to check. Trying again.' logger.warning(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log]) MPQ_STAT.put_nowait(['base', ['mariadb', stat_mdb]]) if stat_mdb < STAT_LVL['not_cfg']: break count += count time.sleep(check_time)
def poll_clear(self): """ Clears operation polling CouchDB self.args[0] = addr_ln """ logfile = 'polling' logger = logging.getLogger(logfile) log = 'Attempting to clear lane {0} '.format(self.args[0]) +\ 'poll data from polling database.' logger.debug(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) # Build and issue InfluxDB command to delete pertinent data data_idb_in = 'q=DROP SERIES WHERE "chan"=\'{0}\''.format(self.args[0]) http_resp = requests.post( 'http://localhost:8086/query?db=JanusESS', headers={'Content-type': 'application/x-www-form-urlencoded'}, data=data_idb_in ) # Determine result of command and issue status if http_resp.status_code == 200: log = 'Clear lane {0} poll data successful.'.\ format(self.args[0]) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) MPQ_STAT.put_nowait([ 'base', [ 'influxdb', STAT_LVL['op'] ] ]) else: log = 'Could not clear lane {0} poll data due '.format(self.args[0]) +\ 'to InfluxDB query error.' logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) MPQ_STAT.put_nowait([ 'base', [ 'influxdb', STAT_LVL['op_err'] ] ])
def poll_start(self): """ Starts polling self.args[0] = addr_ln self.args[1] = poll status self.args[2] = continuous self.args[3] = interval self.args[4] = count """ logfile = 'polling' logger = logging.getLogger(logfile) # Issues request to poll dispatcher to begin polling on this lane MPQ_POLL_START.put_nowait(self.args) log = 'Lane {0} polling start request issued.'.format(self.args[0]) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ])
def cdb_check(): """ Checks CouchDB if ready to accept transactions """ logfile = 'janusess' logger = logging.getLogger(logfile) check_time = 0.5 log = 'Checking CouchDB every {0} sec until operational.'.format( check_time) logger.debug(log) count = 1 while True: # Issue CouchDB GET request and process result http_resp = requests.get('http://127.0.0.1:5984/') # Successful GET request if http_resp.status_code == 200: log = 'CouchDB is operational.' logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) MPQ_STAT.put_nowait(['base', ['couchdb', STAT_LVL['op']]]) break # All GET errors else: log = 'CouchDB is not operational, failed with http ' +\ 'response {0}. Making another attempt.'.format(http_resp.status_code) logger.warning(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log]) MPQ_STAT.put_nowait(['base', ['couchdb', STAT_LVL['cfg_err']]]) count += count time.sleep(check_time)
def effect( self, led_effect: str, mod_uid: str, addr_ln: int, addr_mod: int, ): """ Disables LEDs on module :param led_effect: string :param mod_uid: string :param addr_ln: int :param addr_mod: int """ data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='modconfig', cdb_doc=mod_uid, logfile=logfile, attempts=1) if not stat0_cdb: if data0_cdb_out['status'] < STAT_LVL['crit']: addr_mem = MMAP[data0_cdb_out['mem_map_ver']]['LED_ALL'][0] MPQ_CMD4.put([ mod_uid, addr_ln, addr_mod, addr_mem, self.dict_led_settings[led_effect] ]) else: log = 'Could not complete module LED {0} process due to CouchDB error.'.format( led_effect) logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log])
def interrupt_check_flag(self, attempts: int = 2, stat_en: bool = True): """ Checks GPIO interrupt :param attempts: int :param stat_en: bool :return data_iface_out: int (0/1 if STAT_LVL['op']) :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ attempt = 0 err_iface = True stat_iface = STAT_LVL['op'] data_iface_out = None # Cycle through attempts for attempt in range(1, (attempts + 1)): # There is only one interrupt flag for all four lanes. Therefore # no method exists to isolate which of the four GPIOs were triggered. data_iface_out, err_iface = self.obj_janus.interrupt_check_flag() if err_iface: MPQ_STAT.put_nowait( ['base', ['interface', STAT_LVL['op_err']]]) # Only log warning on last attempt, keeps log clean if attempt == attempts: log = 'Attempt {0} of {1} to '.format(attempt, attempts) + \ 'check GPIO interrupt flag failed.' logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'WARNING', log]) else: break if err_iface: log = 'General IO failure to check GPIO interrupt flag.' logger.critical(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log]) stat_iface = STAT_LVL['crit'] print(log) else: log = 'Successfully checked GPIO interrupt flag after {0} attempts.'.\ format(attempt) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) if stat_en: MPQ_STAT.put_nowait(['base', ['interface', stat_iface]]) return data_iface_out, stat_iface
def interrupt_clear_flag(self, attempts: int = 3, stat_en: bool = True): """ Clears GPIO interrupt :param attempts: int :param stat_en: bool :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ attempt = 0 err_iface = True stat_iface = STAT_LVL['op'] # Cycle through attempts for attempt in range(1, (attempts + 1)): err_iface = self.obj_janus.interrupt_clear_flag() if err_iface: MPQ_STAT.put_nowait( ['base', ['interface', STAT_LVL['op_err']]]) # Only log warning on last attempt, keeps log clean if attempt == attempts: log = 'Attempt {0} of {1} to '.format(attempt, attempts) + \ 'clear GPIO interrupt flag failed.' logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'WARNING', log]) else: break if err_iface: log = 'General IO failure to clear GPIO interrupt flag.' logger.critical(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log]) stat_iface = STAT_LVL['crit'] print(log) else: log = 'Successfully cleared GPIO interrupt flag after {0} attempts.'. \ format(attempt) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) if stat_en: MPQ_STAT.put_nowait(['base', ['interface', stat_iface]]) return stat_iface
def setup(self): """ Setups USB connection to interface board """ # Establish a new persistent USB handle with the interface device. # # Returns False if no error, negative integer for error type. err_iface = self.obj_janus.initialize() if not err_iface: # Get lane call produces error if four-lane interface is not connected. # This is the only way to programmatically differentiate between single- and # four-lane interface boards. # # First return value is 0 through 3 if lane is set, negative integer otherwise. # Second return value is False if no error, negative integer for error type. addr_ln, err_iface = self.obj_janus.i2c_get_lane() # If no error returned, set to four-lane interface if not err_iface: self.addr_ln = addr_ln log = 'Four-lane Janus Interface detected.' logger.info(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'INFO', log]) print(log) else: self.err_iface = 4 log = 'Could not detect Four-lane Janus Interface.' logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'INFO', log]) print(log) # Update core document in CouchDB config database data_cdb_out, stat_cdb, http_cdb = dbase.cdb_request( cdb_cmd='upd_doc', cdb_name='config', cdb_doc='core', data_cdb_in={'interface': self.err_iface}, logfile=logfile) if stat_cdb: log = 'Could not update Janus Interface type in CouchDB.' logger.warning(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log])
def get_data( obj_iface: TYPE_INTERFACE, uid_mod: str, addr_ln: int, addr_mod: int ): """ Retrieves and publishes module sensor data :param obj_iface: Interface Object :param uid_mod: str :param addr_ln: int :param addr_mod: int """ # Change logging level since this operates in multiprocess # Cycle to last entry for most current log setting while not MPQ_POLL_LOG_DATA.empty(): mpq_record = MPQ_POLL_LOG_DATA.get() if mpq_record[0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif mpq_record[0] == 'INFO': logger.setLevel(logging.INFO) elif mpq_record[0] == 'ERROR': logger.setLevel(logging.ERROR) elif mpq_record[0] == 'WARNING': logger.setLevel(logging.WARNING) elif mpq_record[0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) time_a = time.time() log = 'Retrieving lane {0} module {1} id {2} data.'.format(addr_ln, addr_mod, uid_mod) logger.info(log) stat_poll_data = STAT_LVL['op'] uid_mod_i2c = '' uid_mod_i2c_print = '' # Retrieve memory map version of module with I2C address of 0x7F. # If module responds, proceed to module setup actions, otherwise # mod_last_found flag is set. high_mmap = len(MMAP) - 1 addr_mem = MMAP[high_mmap]['M_CFG_ALL'][0] data_len = MMAP[high_mmap]['M_CFG_ALL'][1] data0_iface_out, stat0_iface = obj_iface.i2c_read( addr_ln=addr_ln, addr_mod=addr_mod, addr_mem=addr_mem, data_len=data_len, stat_en=False ) print('RAW POLL DATA: {0}'.format(data0_iface_out)) print('Lane {0} module {1} get_data i2c config time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_a), 3))) if stat0_iface: log = 'Lane {0} module {1} poll can '.format(addr_ln, addr_mod) + \ 'not be completed due to I2C interface error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) print(log) MPQ_STAT.put_nowait([ 'base', [ 'poll_data', STAT_LVL['op_err'] ] ]) MPQ_STAT.put_nowait([ 'module', [ uid_mod, addr_ln, addr_mod, STAT_LVL['op_err'] ] ]) stat_poll_data = STAT_LVL['op_err'] else: # Build module id string from I2C data mod_uid_end = MMAP[data0_iface_out[2]]['M_UID'][0] + \ MMAP[data0_iface_out[2]]['M_UID'][1] - 1 mod_uid_begin = MMAP[data0_iface_out[2]]['M_UID'][0] - 1 for addr_mem in range(mod_uid_end, mod_uid_begin, -1): uidmod_i2c = str(hex(data0_iface_out[addr_mem]))[2:] if len(uidmod_i2c) == 1: uidmod_i2c = '0' + uidmod_i2c uid_mod_i2c += uidmod_i2c # Check that module ids match, then proceed with data collection and reporting uid_mod_print = ''.join(char for char in uid_mod.strip() if isprint(char)) uid_mod_i2c_print = ''.join(char for char in uid_mod_i2c.strip() if isprint(char)) if uid_mod_i2c_print == uid_mod_print: time_b = time.time() # Get module document from CouchDB modconfig database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='modconfig', cdb_doc=uid_mod_print, logfile=logfile ) # Get cloud document from CouchDB config database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='cloud', logfile=logfile ) # Get core document from CouchDB config database data2_cdb_out, stat2_cdb, http2_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='config', cdb_doc='core', logfile=logfile ) print( 'Lane {0} module {1} get_data database time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_b), 3))) if not stat0_cdb and not stat1_cdb and not stat2_cdb: module_stat = STAT_LVL['op'] poll_data_mod = [] poll_head_mod = [ uid_mod_i2c_print, data2_cdb_out['customer'], data2_cdb_out['name'], data0_cdb_out['loc'], addr_ln, addr_mod, ] # Retrieves sensor polling value from module time_c = time.time() data_iface_out, stat_iface = obj_iface.i2c_read( addr_ln=addr_ln, addr_mod=addr_mod, addr_mem=MMAP[data0_iface_out[2]]['S_ALL_VAL'][0], data_len=MMAP[data0_iface_out[2]]['S_ALL_VAL'][1] ) print('Lane {0} module {1} get_data i2c sensor time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_c), 3))) print(data_iface_out) if not stat_iface: # Cycle through all sensors installed on the module led_ctl = Control() for addr_s in range(0, int(data0_cdb_out['num_sensors'])): sensor = 'S{0}'.format(addr_s) log = 'Retrieving lane {0} module {1} sensor {2} data.'.\ format(addr_ln, addr_mod, addr_s) logger.debug(log) # Initialize polling data packet data_dtg = time.time() poll_data_s = [addr_s] # Convert raw values to floating point number, and add to packet val_raw = struct.pack( 'BBBB', int(data_iface_out[3 + (addr_s * 4)]), int(data_iface_out[2 + (addr_s * 4)]), int(data_iface_out[1 + (addr_s * 4)]), int(data_iface_out[0 + (addr_s * 4)]) ) val_convert = round( struct.unpack('>f', val_raw)[0], data0_cdb_out[sensor]['precision'] ) if (val_convert >= data0_cdb_out[sensor]['min']) or \ (val_convert <= data0_cdb_out[sensor]['max']): trig_low = round( float(data0_cdb_out[sensor]['trig_low']), data0_cdb_out[sensor]['precision'] ) trig_high = round( float(data0_cdb_out[sensor]['trig_high']), data0_cdb_out[sensor]['precision'] ) # Determine triggers if val_convert < trig_low: poll_data_s.append('low') poll_data_s.append(True) trigger = trig_low module_stat = STAT_LVL['s_evt'] led_ctl.effect( 'sensor_low', uid_mod_print, addr_ln, addr_mod ) elif val_convert > trig_high: poll_data_s.append('high') poll_data_s.append(True) trigger = trig_high module_stat = STAT_LVL['s_evt'] led_ctl.effect( 'sensor_high', uid_mod_print, addr_ln, addr_mod ) else: poll_data_s.append('off') poll_data_s.append(False) trigger = 0.0 poll_data_s.append(data0_cdb_out[sensor]['type']) poll_data_s.append(val_convert) poll_data_s.append(data_dtg) poll_data_s.append(data0_cdb_out[sensor]['unit']) poll_data_s.append(trigger) poll_data_s.append(data0_cdb_out[sensor]['trig_int']) poll_data_s.append(data0_cdb_out[sensor]['trig_step']) poll_data_mod.append(poll_data_s) MPQ_STAT.put_nowait([ 'poll', [ poll_head_mod, poll_data_s ] ]) time_e = time.time() store_data(data1_cdb_out, poll_head_mod, poll_data_mod) print('Lane {0} module {1} get_data store data time: {2}'. format(addr_ln, addr_mod, round((time.time() - time_e), 3))) else: log = 'Lane {0} module {1} '.format(addr_ln, addr_mod) + \ 'data not added to storage queue due to I2C errors.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) stat_poll_data = STAT_LVL['op_err'] module_stat = STAT_LVL['op_err'] else: log = 'Lane {0} module {1} '.format(addr_ln, addr_mod) + \ 'data not added to storage queue due to CouchDB errors.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) stat_poll_data = STAT_LVL['op_err'] module_stat = STAT_LVL['op_err'] log = 'Completed lane {0} module {1} poll.'.format(addr_ln, addr_mod) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) MPQ_STAT.put_nowait([ 'base', [ 'poll_data', stat_poll_data ] ]) MPQ_STAT.put_nowait([ 'module', [ uid_mod_print, addr_ln, addr_mod, module_stat ] ]) else: stat_poll_data = STAT_LVL['op_err'] log = 'Lane {0} module {1} poll can '.format(addr_ln, addr_mod) + \ 'not be completed due to mismatch in module id: ' + \ 'requested={0} vs polled={1}.'.format(uid_mod_print, uid_mod_i2c_print) logger.critical(log) print(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) MPQ_STAT.put_nowait([ 'base', [ 'poll_data', stat_poll_data ] ]) MPQ_STAT.put_nowait([ 'module', [ uid_mod_print, addr_ln, addr_mod, STAT_LVL['op_err'] ] ]) return stat_poll_data, uid_mod_i2c_print
def store_data( cloud: dict, poll_head: list, poll_data: list ): """ Stores module poll data in CouchDB :param cloud: dict :param poll_head: list :param poll_data: list """ loc_mod = poll_head[3] loc_mod = loc_mod.replace(' ', '\ ') loc_mod = loc_mod.replace(',\ ', '\,\ ') addr_ln = poll_head[4] addr_mod = poll_head[5] data_idb_in = '' data_idb_in_head = '{0},'.format(str(poll_head[0])) + \ 'customer={0},'.format(poll_head[1]) + \ 'base={0},'.format(poll_head[2]) + \ 'location={0},'.format(loc_mod) + \ 'lane={0},'.format(poll_head[4]) + \ 'module={0},'.format(poll_head[5]) for poll_data_s in poll_data: s_type = poll_data_s[3] s_type = s_type.replace(' ', '\ ') s_type = s_type.replace(',\ ', '\,\ ') data_idb_in = data_idb_in + data_idb_in_head + \ 'sensor={0},'.format(poll_data_s[0]) + \ 'alert_type={0},'.format(poll_data_s[1]) + \ 'alert={0} '.format(poll_data_s[2]) + \ '{0}={1} '.format(s_type, str(poll_data_s[4])) + \ '{0}'.format(int(poll_data_s[5] * 1000000000)) +\ "\n" # Store data packet to local InfluxDB JanusESS database try: http0_resp = requests.post( 'http://localhost:8086/write?db=JanusESS', headers={'Content-type': 'application/octet-stream'}, data=data_idb_in, timeout=2.0 # Set higher for portability to rPi v3 ) # Returns HTTP status codes if http0_resp.status_code == 204: log = 'Uploaded of lane {0} module {1} '.format(addr_ln, addr_mod) +\ 'data to local InfluxDB server successful.' logger.debug(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) MPQ_STAT.put_nowait([ 'base', [ 'influxdb', STAT_LVL['op'] ] ]) else: log = 'Could not upload lane {0} module {1} '.format(addr_ln, addr_mod) + \ 'data to local InfluxDB server due to query error.' logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) MPQ_STAT.put_nowait([ 'base', [ 'influxdb', STAT_LVL['op_err'] ] ]) except requests.exceptions.ConnectionError: log = 'Local InfluxDB server did not respond to request.' logger.critical(log) print(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ]) MPQ_STAT.put_nowait([ 'base', [ 'influxdb', STAT_LVL['crit'] ] ]) except requests.exceptions.ReadTimeout: log = 'Local InfluxDB server timed out on request.' logger.warning(log) print(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) MPQ_STAT.put_nowait([ 'base', [ 'influxdb', STAT_LVL['op_err'] ] ]) # If cloud storage is enabled, store data to cloud InfluxDB JanusESS database if cloud['enable']: # Store data packet to cloud InfluxDB JanusESS database server = 'http://' + cloud['url'] + ':8086/write?db=JanusESS' try: http1_resp = requests.post( server, headers={'Content-type': 'application/octet-stream'}, data=data_idb_in, timeout=1 ) # Returns HTTP status codes if http1_resp.status_code == 204: log = 'Uploaded of lane {0} module {1} '.format(addr_ln, addr_mod) + \ 'data to remote InfluxDB server {0} successful.'.format(cloud['url']) logger.debug(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) else: log = 'Could not upload lane {0} module {1} '.format(addr_ln, addr_mod) + \ 'data to remote InfluxDB server due to query error.' logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) except requests.exceptions.ConnectionError: log = 'Remote InfluxDB server {0} did not respond to request.'.format(cloud['url']) logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) except requests.exceptions.ReadTimeout: log = 'Remote InfluxDB server {0} timed out on request.'.format(cloud['url']) logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) log = 'Completed storage of lane {0} module {1} data.'.format(addr_ln, addr_mod) logger.info(log)
def request(file_cmd: str, file_name: str, data_loc: int, num_bytes: int, data_file_in: str, search_field: str = '', replace_line: str = '', logfile: str = 'janusess', attempts: int = 3): """ Executes file transaction :param file_cmd: str :param file_name: str :param data_loc: int :param num_bytes: int :param search_field: str, :param replace_line: str, :param data_file_in: str :param logfile: str :param attempts: int :return read_err: bool :return data: 0 (read_err = True) :return data: str (read_err = False) """ logger = logging.getLogger(logfile) stat_file = STAT_LVL['op'] data_file_out = 0 attempt = 1 file_mode = '' field_dict = {} temp_path = None file_hdlr = None file_temp = None log_oserror = '' log_no_oserror = '' if file_cmd == 'data_read': file_mode = 'r' log_no_oserror = 'to retrieve data from file {0} succeeded.'.format( file_name) log_oserror = 'to retrieve data from file {0} failed.'.format( file_name) elif file_cmd == 'line_app': file_mode = 'a+' log_no_oserror = 'to append line to file {0} succeeded.'.format( file_name) log_oserror = 'to append line to file {0} failed.'.format(file_name) elif file_cmd == 'fld_read': file_mode = 'r' log_no_oserror = 'to read field from file {0} succeeded.'.format( file_name) log_oserror = 'to read field from file {0} failed.'.format(file_name) elif file_cmd == 'fld_read_all': file_mode = 'r' log_no_oserror = 'to read all fields from file {0} succeeded.'.format( file_name) log_oserror = 'to read all fields from file {0} failed.'.format( file_name) elif file_cmd == 'fld_edit': file_mode = 'r' log_no_oserror = 'to edit field in file {0} succeeded.'.format( file_name) log_oserror = 'to edit field in file {0} failed.'.format(file_name) elif file_cmd == 'data_wrt': file_mode = 'r+' log_no_oserror = 'to write data to file {0} succeeded.'.format( file_name) log_oserror = 'to write data to file {0} failed.'.format(file_name) elif file_cmd == 'file_replace': file_mode = 'w' log_no_oserror = 'to replace contents in file {0} succeeded.'.format( file_name) log_oserror = 'to replace contents in file {0} failed.'.format( file_name) # Cycle through attempts for attempt in range(1, (attempts + 1)): try: # Open file in specified mode, utf-8 file_open = open(file_name, mode=file_mode, encoding='utf-8') if file_cmd == 'data_read': file_open.seek(data_loc) data_file_out = file_open.read(num_bytes) elif file_cmd == 'line_app': file_open.write(data_file_in + '\n') file_open.flush() elif file_cmd == 'fld_read': for line in file_open: line = line.split('\n')[0] key = line.split('=')[0] if key == search_field: data_file_out = line.split('=')[1] elif file_cmd == 'fld_read_all': for line in file_open: line = line.split('\n')[0] key, val = line.split('=') field_dict[key] = str(val) elif file_cmd == 'fld_edit': file_hdlr, temp_path = mkstemp() file_temp = open(temp_path, mode='w', encoding='utf-8') found_field = False for line in file_temp: if search_field in line: file_temp.write(replace_line + '\n') found_field = True else: file_temp.write(line) if not found_field: file_temp.write(replace_line) file_temp.flush() elif file_cmd == 'data_wrt': file_open.seek(data_loc) file_open.write(data_file_in) file_open.flush() elif file_cmd == 'file_replace': file_open.seek(0) file_open.write(data_file_in) file_open.flush() # Close file file_open.close() if file_cmd == 'fld_edit': remove(file_name) move(temp_path, file_name) file_temp.close() close(file_hdlr) log = 'Attempt {0} of {1} '.format(attempt, (attempts - 1)) + log_no_oserror logger.debug(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) stat_file = STAT_LVL['op'] break except OSError: stat_file = STAT_LVL['op_err'] MPQ_STAT.put_nowait(['base', ['file', stat_file]]) if attempt == (attempts - 1): log = 'Attempt {0} of {1} '.format( attempt, (attempts - 1)) + log_oserror logger.exception(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'ERROR', log]) time.sleep(0.1 * randint(0, 9) * attempt) log_success = '' log_failure = '' if file_cmd == 'data_read': log_success = 'Successfully read data from file {0} after {1} attempts.'.\ format(file_name, attempt) log_failure = 'General failure to read data from file {0}.'.format( file_name) elif file_cmd == 'line_app': log_success = 'Successfully appended line to file {0} after {1} attempts.'.\ format(file_name, attempt) log_failure = 'General failure to append line to file {0}.'.format( file_name) elif file_cmd == 'fld_read': log_success = 'Successfully read from file {0} after {1} attempts.'.\ format(file_name, attempt) log_failure = 'General failure to read field from file {0}.'.format( file_name) elif file_cmd == 'fld_read_all': log_success = 'Successfully read all fields from file {0} after {1} attempts.'.\ format(file_name, attempt) log_failure = 'General failure to read all fields from file {0}.'.format( file_name) elif file_cmd == 'fld_edit': log_success = 'Successfully edited field in file {0} after {1} attempts.'.\ format(file_name, attempt) log_failure = 'General failure to edit field in file {0}.'.format( file_name) elif file_cmd == 'data_wrt': log_success = 'Successfully wrote data to file {0} after {1} attempts.'.\ format(file_name, attempt) log_failure = 'General failure to write data to file {0}.'.format( file_name) elif file_cmd == 'file_replace': log_success = 'Successfully replaced contents in file {0} after {1} attempts.'.\ format(file_name, attempt) log_failure = 'General failure to replace contents in file {0}.'.format( file_name) if not stat_file: log = log_success activity_status = 'DEBUG' else: log = log_failure activity_status = 'CRITICAL' stat_file = STAT_LVL['crit'] logger.log(logging.INFO if not stat_file else logging.CRITICAL, log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), activity_status, log]) MPQ_STAT.put_nowait(['base', ['file', stat_file]]) return data_file_out, stat_file
def dispatcher(): """ Automatically dispatches polling commands into MPQ_CMD5 priority queue """ stat_cdb = STAT_LVL['op'] stat_cdb_prev = STAT_LVL['not_cfg'] cfg_poll = [ STAT_LVL['not_cfg'], STAT_LVL['not_cfg'], STAT_LVL['not_cfg'], STAT_LVL['not_cfg'] ] while not stat_cdb: # Cycle through all lanes for addr_ln in range(0, 4): # Change logging level since this operates in multiprocess # Cycle to last entry for most current log setting while not MPQ_POLL_LOG_DISP.empty(): mpq_record = MPQ_POLL_LOG_DISP.get() if mpq_record[0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif mpq_record[0] == 'INFO': logger.setLevel(logging.INFO) elif mpq_record[0] == 'ERROR': logger.setLevel(logging.ERROR) elif mpq_record[0] == 'WARNING': logger.setLevel(logging.WARNING) elif mpq_record[0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) # Set polling status flag for this lane if start command is issued # on MPQ_POLL_START if not MPQ_POLL_START.empty(): mpq_record = MPQ_POLL_START.get() cfg_poll[mpq_record[0]] = STAT_LVL['op'] MPQ_STAT.put_nowait([ 'lane', [ mpq_record[0], {'poll': cfg_poll[mpq_record[0]]} ] ]) # Send polling start messaging with timeout in seconds send_mail( msg_type='poll_start', args=[mpq_record[0]], ) # If polling status flag is set, execute polling for lane if not cfg_poll[addr_ln]: # Get lane_status document from CouchDB lanes database data0_cdb_out, stat0_cdb, http0_cdb = dbase.cdb_request( cdb_cmd='get_doc', cdb_name='lanes', cdb_doc='lane{0}_status'.format(addr_ln), logfile=logfile ) # Check that both lane status and polling have not failed if (not stat0_cdb) and (data0_cdb_out['status'] < STAT_LVL['crit']) and \ (data0_cdb_out['poll'] < STAT_LVL['crit']): # Get stat_lane view from CouchDB modconfig database data1_cdb_out, stat1_cdb, http1_cdb = dbase.cdb_request( cdb_cmd='get_view', cdb_name='modconfig', cdb_doc='stat_lane{0}'.format(addr_ln), logfile=logfile ) if not stat1_cdb: # Cycle through all modules connected to lane # If a module has not failed, initiate polling actions for dict_mod in data1_cdb_out: if dict_mod['value']['status'] < STAT_LVL['crit']: # Developer code to check for speed print('priority 5 interface added') time_m = time.time() # Immediate stop polling for this lane # if stop command is issued on MPQ_POLL_STOP if not MPQ_POLL_STOP.empty(): mpq_record = MPQ_POLL_STOP.get() cfg_poll[mpq_record] = STAT_LVL['not_cfg'] log = 'Lane {0} polling exit command executed.'. \ format(mpq_record) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) MPQ_STAT.put_nowait([ 'lane', [ mpq_record, {'poll': cfg_poll[mpq_record]} ] ]) # Send polling stop messaging with timeout in seconds send_mail( msg_type='poll_stop', args=[mpq_record], ) if mpq_record == addr_ln: break # Issue request to poll this module MPQ_CMD5.put([ dict_mod['id'], addr_ln, dict_mod['key'] ]) # Immediate stop polling for this lane # if stop command is issued on MPQ_POLL_STOP. # Second check ensures quicker response time. if not MPQ_POLL_STOP.empty(): mpq_record = MPQ_POLL_STOP.get() cfg_poll[mpq_record] = STAT_LVL['not_cfg'] log = 'Lane {0} polling exit command executed.'.format(mpq_record) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) MPQ_STAT.put_nowait([ 'lane', [ mpq_record, {'poll': cfg_poll[mpq_record]} ] ]) # Send polling stop messaging with timeout in seconds send_mail( msg_type='poll_stop', args=[mpq_record], ) if mpq_record == addr_ln: break log = 'Lane {0} module {1} poll added to job queue.'. \ format(addr_ln, dict_mod['key']) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) # Determine if all module sensor data processing is # complete prior to proceeding. This pause is here # to prevent the poll dispatcher from getting too far # ahead of module sensor data processing. while True: if not MPQ_POLL_COMPLETE.empty(): mpq_record = MPQ_POLL_COMPLETE.get() if (mpq_record[0] == addr_ln) and \ (mpq_record[1] == dict_mod['key']): log = 'Lane {0} module {1} automated poll completed.'.\ format(addr_ln, dict_mod['key']) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'INFO', log ]) break time.sleep(0.02) # Developer code to check for speed print('Module {0} cycle complete: {1}'. format(dict_mod['key'], round((time.time() - time_m), 3))) log = 'Lane {0} module {1} poll dispatch cycle complete.'. \ format(addr_ln, dict_mod['key']) logger.info(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'DEBUG', log ]) else: stat_cdb = stat1_cdb last_dtg = time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime(time.time()) ) MPQ_STAT.put_nowait([ 'lane', [ addr_ln, {'last_dtg': last_dtg} ] ]) time.sleep(15) else: stat_cdb = stat0_cdb time.sleep(1) # Determine any changes in CouchDB status and report for both # CouchDB and poll dispatcher. CouchDB is only cause for change in # poll_dispatcher status if stat_cdb != stat_cdb_prev: stat_cdb_prev = stat_cdb MPQ_STAT.put_nowait([ 'base', [ 'couchdb', stat_cdb ] ]) MPQ_STAT.put_nowait([ 'base', [ 'poll_dispatch', stat_cdb ] ]) log = 'Polling dispatcher failed due to CouchDB error.' logger.critical(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'CRITICAL', log ])
def i2c_read(self, addr_ln: int, addr_mod: int, addr_mem: int, data_len: int, stat_en: bool = True, attempts: int = 1): """ Reads block of bytes from I2C network :param addr_ln: int :param addr_mod: int :param addr_mem: int :param data_len: int :param stat_en: bool :param attempts: int :return data_iface_out: list (if STAT_LVL['op']) :return data_iface_out: 0 (if STAT_LVL['crit']) :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ # Janus interfaces allow for 64 byte transfers, including 5 or 6 bytes of # addressing header information. Modules use an memory map that is # 256 or 512 bytes in length, therefore breaking data into 32 byte blocks # is more convenient to track and error-check than 58- or 59-byte blocks. packet_length = 48 number_packets = data_len // packet_length number_bytes = data_len % packet_length attempt = 0 data_iface_out = [] err_iface = False stat_iface = STAT_LVL['op'] # Cycle through all whole blocks for packet in range(0, (number_packets + 1)): data_addr = ((packet * packet_length) + addr_mem) if packet < number_packets: data_length = packet_length else: data_length = number_bytes # Cycle through attempts for attempt in range(1, (attempts + 1)): # Module 0x7F read begins at later failure stage and uses # less corrective actions on failure to reduce setup time. if addr_mod == 127: data_out, err_iface = self.obj_janus.i2c_read( addr_i2c=addr_mod, addr_mem=data_addr, data_len=data_length, reset=False) else: data_out, err_iface = self.obj_janus.i2c_read( addr_i2c=addr_mod, addr_mem=data_addr, data_len=data_length) if err_iface: # Only process errors on attempts for non-0x7F I2C addresses if addr_mod != 127: MPQ_STAT.put_nowait( ['base', ['interface', STAT_LVL['op_err']]]) # Only log warning on last attempt, keeps log clean if attempt == attempts: log = 'Attempt {0} of {1} to '.format(attempt, attempts) + \ 'read packet {0} from I2C '.format(packet) +\ 'link {0} module {1} failed.'.format(addr_ln, addr_mod) logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) else: break else: # Add retrieved data to the end of the Python list variable data_iface_out.extend(data_out) break if err_iface: break if err_iface: data_iface_out = [] # Only process errors on attempts for non-0x7F I2C addresses if addr_mod != 127: log = 'General IO failure to read data from lane {0} module {1}.'.\ format(addr_ln, addr_mod) logger.critical(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log]) print(log) stat_iface = STAT_LVL['crit'] else: stat_iface = STAT_LVL['op_err'] else: log = 'Successfully read data from lane {0} '.format(addr_ln) + \ 'module {0} memory {1} length {2} '.format(addr_mod, addr_mem, data_len) +\ 'after {0} attempts.'.format(attempt) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) if stat_en: MPQ_STAT.put_nowait(['base', ['interface', stat_iface]]) return data_iface_out, stat_iface
def i2c_write(self, addr_ln: int, addr_mod: int, addr_mem: int, data_iface_in: list, stat_en: bool = True, attempts: int = 1): """ Writes block of bytes to I2C network :param addr_ln: int :param addr_mod: int :param addr_mem: int :param data_iface_in: list :param stat_en: bool :param attempts: int :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ data_len = len(data_iface_in) packet_length = 24 number_packets = data_len // packet_length attempt = 0 err_iface = False stat_iface = STAT_LVL['op'] # Cycle through all whole blocks for packet in range(0, (number_packets + 1)): if packet < number_packets: data_in = data_iface_in[(packet * packet_length):packet_length] else: data_in = data_iface_in[(packet * packet_length):] # Cycle through attempts for attempt in range(1, (attempts + 1)): # Module 0x7F write begins at later failure stage and uses # less corrective actions on failure to reduce setup time. if addr_mod == 127: data_out, err_iface = self.obj_janus.i2c_write( addr_i2c=addr_mod, addr_mem=((packet * packet_length) + addr_mem), data=data_in, reset=False) else: data_out, err_iface = self.obj_janus.i2c_write( addr_i2c=addr_mod, addr_mem=((packet * packet_length) + addr_mem), data=data_in) if err_iface: # Only process errors on attempts for non-0x7F I2C addresses if addr_mod != 127: MPQ_STAT.put_nowait( ['base', ['interface', STAT_LVL['op_err']]]) # Only log warning on last attempt, keeps log clean if attempt == attempts: log = 'Attempt {0} of {1} to '.format(attempt, attempts) + \ 'write packet {0} to I2C '.format(packet) + \ 'link {0} module {1} failed.'.format(addr_ln, addr_mod) logger.warning(log) MPQ_ACT.put_nowait([ datetime.now().isoformat(' '), 'WARNING', log ]) else: break else: break if err_iface: break if err_iface: # Only process errors on attempts for non-0x7F I2C addresses if addr_mod != 127: log = 'General IO failure to write data to lane {0} module {1}.'. \ format(addr_ln, addr_mod) logger.critical(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log]) print(log) stat_iface = STAT_LVL['crit'] else: stat_iface = STAT_LVL['op_err'] else: log = 'Successfully write data to lane {0} '.format(addr_ln) + \ 'module {0} memory {1} length {2} '.format(addr_mod, addr_mem, data_len) + \ 'after {0} attempts.'.format(attempt) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) if stat_en: MPQ_STAT.put_nowait(['base', ['interface', stat_iface]]) return stat_iface
def gpio_read(self, addr_ln: int, mode: bool = True, stat_en: bool = True, attempts: int = 2): """ Reads GPIO pin value :param addr_ln: int :param mode: bool :param stat_en: bool :param attempts: int :return data_iface_out: int (0/1 if STAT_LVL['op']) :return stat_iface: STAT_LVL['op'] or STAT_LVL['crit'] """ attempt = 0 err_iface = True data_iface_out = None stat_iface = STAT_LVL['op'] # Cycle through attempts for attempt in range(1, (attempts + 1)): # If mode flag is set, set GPIO mode to 'IN' prior to GPIO read if mode: err_iface = self.obj_janus.gpio_set_mode(pin=addr_ln, mode='IN') if not err_iface: data_iface_out, err_iface = self.obj_janus.gpio_read( pin=addr_ln) else: data_iface_out, err_iface = self.obj_janus.gpio_read( pin=addr_ln) if err_iface: MPQ_STAT.put_nowait( ['base', ['interface', STAT_LVL['op_err']]]) # Only log warning on last attempt, keeps log clean if attempt == attempts: log = 'Attempt {0} of {1} to '.format(attempt, attempts) + \ 'read GPIO pin {0} failed.'.format(addr_ln) logger.warning(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'WARNING', log]) else: break if err_iface: log = 'General IO failure to read GPIO pin {0}.'.format(addr_ln) logger.critical(log) MPQ_ACT.put_nowait( [datetime.now().isoformat(' '), 'CRITICAL', log]) stat_iface = STAT_LVL['crit'] print(log) else: log = 'Successfully read GPIO pin {0} after {1} attempts.'.\ format(addr_ln, attempt) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) if stat_en: MPQ_STAT.put_nowait(['base', ['interface', stat_iface]]) return data_iface_out, stat_iface
# Initialize process ID CouchDB entry and begin heartbeat status listener process # to monitor statuses of JanusESS processes. Must start this immediately after # network check to ensure that network check status is updated in CouchDB prior # to any other processes which are trapped by try/except routines. data_cdb_in = {} obj_hb = heartbeat.HeartBeat() stat_hb_stat = STAT_LVL['op'] try: mp_hb_stat = multiprocessing.Process(target=obj_hb.stat_listener, args=()) mp_hb_stat.start() data_cdb_in['heartbeat_status'] = mp_hb_stat.pid log = 'Heartbeat status listener process started.' logger.debug(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) except multiprocessing.ProcessError: log = 'Can not start heartbeat status listener due to multiprocessing error.' logger.exception(log) stat_hb_stat = STAT_LVL['not_cfg'] # Start Tornado webserver main and websocket application processes flask_obj = FlaskServer() flask_obj.webserver() # Begin heartbeat activity listener process to monitor statuses of JanusESS processes. # Since this is a GUI-based process, do not start until after Tornado webserver is started. stat_hb_act = STAT_LVL['op'] try: mp_hb_act = multiprocessing.Process(target=obj_hb.act_listener,
def interrupt(obj_iface: TYPE_INTERFACE, addr_ln: int, addr_mod: int, evt_byte: list): """ Polls modules for interrupts :param obj_iface: Interface Object :param addr_ln: int :param addr_mod: int :param evt_byte: list :return stat_cdb: STAT_LVL['op'] or STAT_LVL['op_err'] """ while not MPQ_FLAG_LOG.empty(): mpq_record = MPQ_FLAG_LOG.get() if mpq_record[0] == 'DEBUG': logger.setLevel(logging.DEBUG) elif mpq_record[0] == 'INFO': logger.setLevel(logging.INFO) elif mpq_record[0] == 'ERROR': logger.setLevel(logging.ERROR) elif mpq_record[0] == 'WARNING': logger.setLevel(logging.WARNING) elif mpq_record[0] == 'CRITICAL': logger.setLevel(logging.CRITICAL) stat_mod = STAT_LVL['op'] # Get trigger flag from module data_iface_in, err_iface = obj_iface.i2c_read(addr_ln=addr_ln, addr_mod=addr_mod, addr_mem=evt_byte[0], data_len=evt_byte[1]) print('check interrupt flag on lane {0} module {1}: {2}'.format( addr_ln, addr_mod, data_iface_in)) # Check if trigger flag has been set, then perform actions if (not err_iface) and (data_iface_in[0] > 0): log = 'Interrupt discovered for lane {0} module {1}.'.\ format(addr_ln, addr_mod, data_iface_in) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'INFO', log]) # TODO: Assess proper value to send to STAT_MPQ and process for interrupt type stat_mod = STAT_LVL['op_evt'] # Clear flag on module err_iface = obj_iface.i2c_write(addr_ln=addr_ln, addr_mod=addr_mod, addr_mem=evt_byte[0], data_out=[0]) elif (not err_iface) and (data_iface_in[0] == 0): log = 'No interrupt discovered for lane {0} module {1}.'. \ format(addr_ln, addr_mod, data_iface_in) logger.info(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'DEBUG', log]) if err_iface: log = 'Could not check and clear event flag on lane {0} '.format(addr_ln) +\ 'module {0} due to interface errors.'.format(addr_mod) logger.warning(log) MPQ_ACT.put_nowait([datetime.now().isoformat(' '), 'WARNING', log]) stat_mod = STAT_LVL['op_err'] return stat_mod